12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 231143d5dSDavid Howells /* handling of writes to regular files and writing back to the server 331143d5dSDavid Howells * 431143d5dSDavid Howells * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 531143d5dSDavid Howells * Written by David Howells (dhowells@redhat.com) 631143d5dSDavid Howells */ 74343d008SDavid Howells 84af3c9ccSAlexey Dobriyan #include <linux/backing-dev.h> 931143d5dSDavid Howells #include <linux/slab.h> 1031143d5dSDavid Howells #include <linux/fs.h> 1131143d5dSDavid Howells #include <linux/pagemap.h> 1231143d5dSDavid Howells #include <linux/writeback.h> 1331143d5dSDavid Howells #include <linux/pagevec.h> 1431143d5dSDavid Howells #include "internal.h" 1531143d5dSDavid Howells 1631143d5dSDavid Howells /* 1731143d5dSDavid Howells * mark a page as having been made dirty and thus needing writeback 1831143d5dSDavid Howells */ 1931143d5dSDavid Howells int afs_set_page_dirty(struct page *page) 2031143d5dSDavid Howells { 2131143d5dSDavid Howells _enter(""); 2231143d5dSDavid Howells return __set_page_dirty_nobuffers(page); 2331143d5dSDavid Howells } 2431143d5dSDavid Howells 2531143d5dSDavid Howells /* 26c4508464SDavid Howells * Handle completion of a read operation to fill a page. 27c4508464SDavid Howells */ 28c4508464SDavid Howells static void afs_fill_hole(struct afs_read *req) 29c4508464SDavid Howells { 30c4508464SDavid Howells if (iov_iter_count(req->iter) > 0) 31c4508464SDavid Howells /* The read was short - clear the excess buffer. */ 32c4508464SDavid Howells iov_iter_zero(iov_iter_count(req->iter), req->iter); 33c4508464SDavid Howells } 34c4508464SDavid Howells 35c4508464SDavid Howells /* 3631143d5dSDavid Howells * partly or wholly fill a page that's under preparation for writing 3731143d5dSDavid Howells */ 38c69bf479SDavid Howells static int afs_fill_page(struct file *file, 39e8e581a8SDavid Howells loff_t pos, unsigned int len, struct page *page) 4031143d5dSDavid Howells { 41c69bf479SDavid Howells struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 42196ee9cdSDavid Howells struct afs_read *req; 432a0b4f64SDavid Howells size_t p; 442a0b4f64SDavid Howells void *data; 4531143d5dSDavid Howells int ret; 4631143d5dSDavid Howells 475e7f2337SAnton Blanchard _enter(",,%llu", (unsigned long long)pos); 4831143d5dSDavid Howells 492a0b4f64SDavid Howells if (pos >= vnode->vfs_inode.i_size) { 502a0b4f64SDavid Howells p = pos & ~PAGE_MASK; 512a0b4f64SDavid Howells ASSERTCMP(p + len, <=, PAGE_SIZE); 522a0b4f64SDavid Howells data = kmap(page); 532a0b4f64SDavid Howells memset(data + p, 0, len); 542a0b4f64SDavid Howells kunmap(page); 552a0b4f64SDavid Howells return 0; 562a0b4f64SDavid Howells } 572a0b4f64SDavid Howells 58c4508464SDavid Howells req = kzalloc(sizeof(struct afs_read), GFP_KERNEL); 59196ee9cdSDavid Howells if (!req) 60196ee9cdSDavid Howells return -ENOMEM; 61196ee9cdSDavid Howells 62f3ddee8dSDavid Howells refcount_set(&req->usage, 1); 63c4508464SDavid Howells req->vnode = vnode; 64c4508464SDavid Howells req->done = afs_fill_hole; 65c69bf479SDavid Howells req->key = key_get(afs_file_key(file)); 66196ee9cdSDavid Howells req->pos = pos; 67e8e581a8SDavid Howells req->len = len; 68196ee9cdSDavid Howells req->nr_pages = 1; 69c4508464SDavid Howells req->iter = &req->def_iter; 70c4508464SDavid Howells iov_iter_xarray(&req->def_iter, READ, &file->f_mapping->i_pages, pos, len); 71196ee9cdSDavid Howells 72c69bf479SDavid Howells ret = afs_fetch_data(vnode, req); 73196ee9cdSDavid Howells afs_put_read(req); 7431143d5dSDavid Howells if (ret < 0) { 7531143d5dSDavid Howells if (ret == -ENOENT) { 7631143d5dSDavid Howells _debug("got NOENT from server" 7731143d5dSDavid Howells " - marking file deleted and stale"); 7831143d5dSDavid Howells set_bit(AFS_VNODE_DELETED, &vnode->flags); 7931143d5dSDavid Howells ret = -ESTALE; 8031143d5dSDavid Howells } 8131143d5dSDavid Howells } 8231143d5dSDavid Howells 8331143d5dSDavid Howells _leave(" = %d", ret); 8431143d5dSDavid Howells return ret; 8531143d5dSDavid Howells } 8631143d5dSDavid Howells 8731143d5dSDavid Howells /* 8831143d5dSDavid Howells * prepare to perform part of a write to a page 8931143d5dSDavid Howells */ 9015b4650eSNick Piggin int afs_write_begin(struct file *file, struct address_space *mapping, 9115b4650eSNick Piggin loff_t pos, unsigned len, unsigned flags, 9221db2cdcSDavid Howells struct page **_page, void **fsdata) 9331143d5dSDavid Howells { 94496ad9aaSAl Viro struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 9515b4650eSNick Piggin struct page *page; 964343d008SDavid Howells unsigned long priv; 974343d008SDavid Howells unsigned f, from = pos & (PAGE_SIZE - 1); 984343d008SDavid Howells unsigned t, to = from + len; 9909cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT; 10031143d5dSDavid Howells int ret; 10131143d5dSDavid Howells 1023b6492dfSDavid Howells _enter("{%llx:%llu},{%lx},%u,%u", 10315b4650eSNick Piggin vnode->fid.vid, vnode->fid.vnode, index, from, to); 10431143d5dSDavid Howells 10554566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 1064343d008SDavid Howells if (!page) 10715b4650eSNick Piggin return -ENOMEM; 10815b4650eSNick Piggin 10909cbfeafSKirill A. Shutemov if (!PageUptodate(page) && len != PAGE_SIZE) { 110c69bf479SDavid Howells ret = afs_fill_page(file, pos & PAGE_MASK, PAGE_SIZE, page); 11131143d5dSDavid Howells if (ret < 0) { 1126d06b0d2SDavid Howells unlock_page(page); 1136d06b0d2SDavid Howells put_page(page); 11431143d5dSDavid Howells _leave(" = %d [prep]", ret); 11531143d5dSDavid Howells return ret; 11631143d5dSDavid Howells } 11715b4650eSNick Piggin SetPageUptodate(page); 11831143d5dSDavid Howells } 11931143d5dSDavid Howells 120630f5ddaSDavid Howells #ifdef CONFIG_AFS_FSCACHE 121630f5ddaSDavid Howells wait_on_page_fscache(page); 122630f5ddaSDavid Howells #endif 123630f5ddaSDavid Howells 12431143d5dSDavid Howells try_again: 1254343d008SDavid Howells /* See if this page is already partially written in a way that we can 1264343d008SDavid Howells * merge the new write with. 1274343d008SDavid Howells */ 1284343d008SDavid Howells t = f = 0; 1294343d008SDavid Howells if (PagePrivate(page)) { 1304343d008SDavid Howells priv = page_private(page); 13167d78a6fSDavid Howells f = afs_page_dirty_from(page, priv); 13267d78a6fSDavid Howells t = afs_page_dirty_to(page, priv); 1334343d008SDavid Howells ASSERTCMP(f, <=, t); 13431143d5dSDavid Howells } 13531143d5dSDavid Howells 1364343d008SDavid Howells if (f != t) { 1375a039c32SDavid Howells if (PageWriteback(page)) { 13867d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("alrdy"), page); 1395a039c32SDavid Howells goto flush_conflicting_write; 1405a039c32SDavid Howells } 1415a813276SDavid Howells /* If the file is being filled locally, allow inter-write 1425a813276SDavid Howells * spaces to be merged into writes. If it's not, only write 1435a813276SDavid Howells * back what the user gives us. 1445a813276SDavid Howells */ 1455a813276SDavid Howells if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) && 1465a813276SDavid Howells (to < f || from > t)) 1474343d008SDavid Howells goto flush_conflicting_write; 14831143d5dSDavid Howells } 14931143d5dSDavid Howells 15021db2cdcSDavid Howells *_page = page; 1514343d008SDavid Howells _leave(" = 0"); 15231143d5dSDavid Howells return 0; 15331143d5dSDavid Howells 1544343d008SDavid Howells /* The previous write and this write aren't adjacent or overlapping, so 1554343d008SDavid Howells * flush the page out. 1564343d008SDavid Howells */ 1574343d008SDavid Howells flush_conflicting_write: 15831143d5dSDavid Howells _debug("flush conflict"); 1594343d008SDavid Howells ret = write_one_page(page); 16021db2cdcSDavid Howells if (ret < 0) 16121db2cdcSDavid Howells goto error; 16231143d5dSDavid Howells 1634343d008SDavid Howells ret = lock_page_killable(page); 16421db2cdcSDavid Howells if (ret < 0) 16521db2cdcSDavid Howells goto error; 16621db2cdcSDavid Howells goto try_again; 16721db2cdcSDavid Howells 16821db2cdcSDavid Howells error: 16921db2cdcSDavid Howells put_page(page); 1704343d008SDavid Howells _leave(" = %d", ret); 1714343d008SDavid Howells return ret; 1724343d008SDavid Howells } 17331143d5dSDavid Howells 17431143d5dSDavid Howells /* 17531143d5dSDavid Howells * finalise part of a write to a page 17631143d5dSDavid Howells */ 17715b4650eSNick Piggin int afs_write_end(struct file *file, struct address_space *mapping, 17815b4650eSNick Piggin loff_t pos, unsigned len, unsigned copied, 17915b4650eSNick Piggin struct page *page, void *fsdata) 18031143d5dSDavid Howells { 181496ad9aaSAl Viro struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 182f792e3acSDavid Howells unsigned long priv; 183f792e3acSDavid Howells unsigned int f, from = pos & (PAGE_SIZE - 1); 184f792e3acSDavid Howells unsigned int t, to = from + copied; 18531143d5dSDavid Howells loff_t i_size, maybe_i_size; 1863ad216eeSDavid Howells int ret = 0; 18731143d5dSDavid Howells 1883b6492dfSDavid Howells _enter("{%llx:%llu},{%lx}", 18915b4650eSNick Piggin vnode->fid.vid, vnode->fid.vnode, page->index); 19031143d5dSDavid Howells 1913ad216eeSDavid Howells if (copied == 0) 1923ad216eeSDavid Howells goto out; 1933ad216eeSDavid Howells 19415b4650eSNick Piggin maybe_i_size = pos + copied; 19531143d5dSDavid Howells 19631143d5dSDavid Howells i_size = i_size_read(&vnode->vfs_inode); 19731143d5dSDavid Howells if (maybe_i_size > i_size) { 1981f32ef79SDavid Howells write_seqlock(&vnode->cb_lock); 19931143d5dSDavid Howells i_size = i_size_read(&vnode->vfs_inode); 20031143d5dSDavid Howells if (maybe_i_size > i_size) 20131143d5dSDavid Howells i_size_write(&vnode->vfs_inode, maybe_i_size); 2021f32ef79SDavid Howells write_sequnlock(&vnode->cb_lock); 20331143d5dSDavid Howells } 20431143d5dSDavid Howells 205e8e581a8SDavid Howells if (!PageUptodate(page)) { 206e8e581a8SDavid Howells if (copied < len) { 207e8e581a8SDavid Howells /* Try and load any missing data from the server. The 208e8e581a8SDavid Howells * unmarshalling routine will take care of clearing any 209e8e581a8SDavid Howells * bits that are beyond the EOF. 210e8e581a8SDavid Howells */ 211c69bf479SDavid Howells ret = afs_fill_page(file, pos + copied, 212e8e581a8SDavid Howells len - copied, page); 213e8e581a8SDavid Howells if (ret < 0) 214afae457dSDavid Howells goto out; 215e8e581a8SDavid Howells } 216e8e581a8SDavid Howells SetPageUptodate(page); 217e8e581a8SDavid Howells } 218e8e581a8SDavid Howells 219f792e3acSDavid Howells if (PagePrivate(page)) { 220f792e3acSDavid Howells priv = page_private(page); 22167d78a6fSDavid Howells f = afs_page_dirty_from(page, priv); 22267d78a6fSDavid Howells t = afs_page_dirty_to(page, priv); 223f792e3acSDavid Howells if (from < f) 224f792e3acSDavid Howells f = from; 225f792e3acSDavid Howells if (to > t) 226f792e3acSDavid Howells t = to; 22767d78a6fSDavid Howells priv = afs_page_dirty(page, f, t); 228f792e3acSDavid Howells set_page_private(page, priv); 22967d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("dirty+"), page); 230f792e3acSDavid Howells } else { 23167d78a6fSDavid Howells priv = afs_page_dirty(page, from, to); 232f792e3acSDavid Howells attach_page_private(page, (void *)priv); 23367d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("dirty"), page); 234f792e3acSDavid Howells } 235f792e3acSDavid Howells 23631143d5dSDavid Howells set_page_dirty(page); 23731143d5dSDavid Howells if (PageDirty(page)) 23831143d5dSDavid Howells _debug("dirtied"); 239afae457dSDavid Howells ret = copied; 240afae457dSDavid Howells 241afae457dSDavid Howells out: 24215b4650eSNick Piggin unlock_page(page); 24309cbfeafSKirill A. Shutemov put_page(page); 244afae457dSDavid Howells return ret; 24531143d5dSDavid Howells } 24631143d5dSDavid Howells 24731143d5dSDavid Howells /* 24831143d5dSDavid Howells * kill all the pages in the given range 24931143d5dSDavid Howells */ 2504343d008SDavid Howells static void afs_kill_pages(struct address_space *mapping, 25131143d5dSDavid Howells pgoff_t first, pgoff_t last) 25231143d5dSDavid Howells { 2534343d008SDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 25431143d5dSDavid Howells struct pagevec pv; 25531143d5dSDavid Howells unsigned count, loop; 25631143d5dSDavid Howells 2573b6492dfSDavid Howells _enter("{%llx:%llu},%lx-%lx", 25831143d5dSDavid Howells vnode->fid.vid, vnode->fid.vnode, first, last); 25931143d5dSDavid Howells 26086679820SMel Gorman pagevec_init(&pv); 26131143d5dSDavid Howells 26231143d5dSDavid Howells do { 26331143d5dSDavid Howells _debug("kill %lx-%lx", first, last); 26431143d5dSDavid Howells 26531143d5dSDavid Howells count = last - first + 1; 26631143d5dSDavid Howells if (count > PAGEVEC_SIZE) 26731143d5dSDavid Howells count = PAGEVEC_SIZE; 2684343d008SDavid Howells pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); 26931143d5dSDavid Howells ASSERTCMP(pv.nr, ==, count); 27031143d5dSDavid Howells 27131143d5dSDavid Howells for (loop = 0; loop < count; loop++) { 2727286a35eSDavid Howells struct page *page = pv.pages[loop]; 2737286a35eSDavid Howells ClearPageUptodate(page); 2747286a35eSDavid Howells SetPageError(page); 2754343d008SDavid Howells end_page_writeback(page); 2764343d008SDavid Howells if (page->index >= first) 2774343d008SDavid Howells first = page->index + 1; 2784343d008SDavid Howells lock_page(page); 2794343d008SDavid Howells generic_error_remove_page(mapping, page); 28021bd68f1SMarc Dionne unlock_page(page); 2814343d008SDavid Howells } 2824343d008SDavid Howells 2834343d008SDavid Howells __pagevec_release(&pv); 2844343d008SDavid Howells } while (first <= last); 2854343d008SDavid Howells 2864343d008SDavid Howells _leave(""); 2874343d008SDavid Howells } 2884343d008SDavid Howells 2894343d008SDavid Howells /* 2904343d008SDavid Howells * Redirty all the pages in a given range. 2914343d008SDavid Howells */ 2924343d008SDavid Howells static void afs_redirty_pages(struct writeback_control *wbc, 2934343d008SDavid Howells struct address_space *mapping, 2944343d008SDavid Howells pgoff_t first, pgoff_t last) 2954343d008SDavid Howells { 2964343d008SDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 2974343d008SDavid Howells struct pagevec pv; 2984343d008SDavid Howells unsigned count, loop; 2994343d008SDavid Howells 3003b6492dfSDavid Howells _enter("{%llx:%llu},%lx-%lx", 3014343d008SDavid Howells vnode->fid.vid, vnode->fid.vnode, first, last); 3024343d008SDavid Howells 303487e2c9fSLinus Torvalds pagevec_init(&pv); 3044343d008SDavid Howells 3054343d008SDavid Howells do { 3064343d008SDavid Howells _debug("redirty %lx-%lx", first, last); 3074343d008SDavid Howells 3084343d008SDavid Howells count = last - first + 1; 3094343d008SDavid Howells if (count > PAGEVEC_SIZE) 3104343d008SDavid Howells count = PAGEVEC_SIZE; 3114343d008SDavid Howells pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); 3124343d008SDavid Howells ASSERTCMP(pv.nr, ==, count); 3134343d008SDavid Howells 3144343d008SDavid Howells for (loop = 0; loop < count; loop++) { 3154343d008SDavid Howells struct page *page = pv.pages[loop]; 3164343d008SDavid Howells 3174343d008SDavid Howells redirty_page_for_writepage(wbc, page); 3187286a35eSDavid Howells end_page_writeback(page); 3197286a35eSDavid Howells if (page->index >= first) 3207286a35eSDavid Howells first = page->index + 1; 32131143d5dSDavid Howells } 32231143d5dSDavid Howells 32331143d5dSDavid Howells __pagevec_release(&pv); 3244343d008SDavid Howells } while (first <= last); 32531143d5dSDavid Howells 32631143d5dSDavid Howells _leave(""); 32731143d5dSDavid Howells } 32831143d5dSDavid Howells 32931143d5dSDavid Howells /* 330a58823acSDavid Howells * completion of write to server 331a58823acSDavid Howells */ 332bd80d8a8SDavid Howells static void afs_pages_written_back(struct afs_vnode *vnode, pgoff_t start, pgoff_t last) 333a58823acSDavid Howells { 334bd80d8a8SDavid Howells struct address_space *mapping = vnode->vfs_inode.i_mapping; 335bd80d8a8SDavid Howells struct page *page; 336bd80d8a8SDavid Howells 337bd80d8a8SDavid Howells XA_STATE(xas, &mapping->i_pages, start); 338a58823acSDavid Howells 339a58823acSDavid Howells _enter("{%llx:%llu},{%lx-%lx}", 340bd80d8a8SDavid Howells vnode->fid.vid, vnode->fid.vnode, start, last); 341a58823acSDavid Howells 342bd80d8a8SDavid Howells rcu_read_lock(); 343a58823acSDavid Howells 344bd80d8a8SDavid Howells xas_for_each(&xas, page, last) { 345bd80d8a8SDavid Howells ASSERT(PageWriteback(page)); 346a58823acSDavid Howells 347bd80d8a8SDavid Howells detach_page_private(page); 348bd80d8a8SDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("clear"), page); 349bd80d8a8SDavid Howells page_endio(page, true, 0); 350a58823acSDavid Howells } 351bd80d8a8SDavid Howells 352bd80d8a8SDavid Howells rcu_read_unlock(); 353a58823acSDavid Howells 354a58823acSDavid Howells afs_prune_wb_keys(vnode); 355a58823acSDavid Howells _leave(""); 356a58823acSDavid Howells } 357a58823acSDavid Howells 358a58823acSDavid Howells /* 359e49c7b2fSDavid Howells * Find a key to use for the writeback. We cached the keys used to author the 360e49c7b2fSDavid Howells * writes on the vnode. *_wbk will contain the last writeback key used or NULL 361e49c7b2fSDavid Howells * and we need to start from there if it's set. 362e49c7b2fSDavid Howells */ 363e49c7b2fSDavid Howells static int afs_get_writeback_key(struct afs_vnode *vnode, 364e49c7b2fSDavid Howells struct afs_wb_key **_wbk) 365e49c7b2fSDavid Howells { 366e49c7b2fSDavid Howells struct afs_wb_key *wbk = NULL; 367e49c7b2fSDavid Howells struct list_head *p; 368e49c7b2fSDavid Howells int ret = -ENOKEY, ret2; 369e49c7b2fSDavid Howells 370e49c7b2fSDavid Howells spin_lock(&vnode->wb_lock); 371e49c7b2fSDavid Howells if (*_wbk) 372e49c7b2fSDavid Howells p = (*_wbk)->vnode_link.next; 373e49c7b2fSDavid Howells else 374e49c7b2fSDavid Howells p = vnode->wb_keys.next; 375e49c7b2fSDavid Howells 376e49c7b2fSDavid Howells while (p != &vnode->wb_keys) { 377e49c7b2fSDavid Howells wbk = list_entry(p, struct afs_wb_key, vnode_link); 378e49c7b2fSDavid Howells _debug("wbk %u", key_serial(wbk->key)); 379e49c7b2fSDavid Howells ret2 = key_validate(wbk->key); 380e49c7b2fSDavid Howells if (ret2 == 0) { 381e49c7b2fSDavid Howells refcount_inc(&wbk->usage); 382e49c7b2fSDavid Howells _debug("USE WB KEY %u", key_serial(wbk->key)); 383e49c7b2fSDavid Howells break; 384e49c7b2fSDavid Howells } 385e49c7b2fSDavid Howells 386e49c7b2fSDavid Howells wbk = NULL; 387e49c7b2fSDavid Howells if (ret == -ENOKEY) 388e49c7b2fSDavid Howells ret = ret2; 389e49c7b2fSDavid Howells p = p->next; 390e49c7b2fSDavid Howells } 391e49c7b2fSDavid Howells 392e49c7b2fSDavid Howells spin_unlock(&vnode->wb_lock); 393e49c7b2fSDavid Howells if (*_wbk) 394e49c7b2fSDavid Howells afs_put_wb_key(*_wbk); 395e49c7b2fSDavid Howells *_wbk = wbk; 396e49c7b2fSDavid Howells return 0; 397e49c7b2fSDavid Howells } 398e49c7b2fSDavid Howells 399e49c7b2fSDavid Howells static void afs_store_data_success(struct afs_operation *op) 400e49c7b2fSDavid Howells { 401e49c7b2fSDavid Howells struct afs_vnode *vnode = op->file[0].vnode; 402e49c7b2fSDavid Howells 403da8d0755SDavid Howells op->ctime = op->file[0].scb.status.mtime_client; 404e49c7b2fSDavid Howells afs_vnode_commit_status(op, &op->file[0]); 405e49c7b2fSDavid Howells if (op->error == 0) { 406d383e346SDavid Howells if (!op->store.laundering) 407e49c7b2fSDavid Howells afs_pages_written_back(vnode, op->store.first, op->store.last); 408e49c7b2fSDavid Howells afs_stat_v(vnode, n_stores); 409bd80d8a8SDavid Howells atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes); 410e49c7b2fSDavid Howells } 411e49c7b2fSDavid Howells } 412e49c7b2fSDavid Howells 413e49c7b2fSDavid Howells static const struct afs_operation_ops afs_store_data_operation = { 414e49c7b2fSDavid Howells .issue_afs_rpc = afs_fs_store_data, 415e49c7b2fSDavid Howells .issue_yfs_rpc = yfs_fs_store_data, 416e49c7b2fSDavid Howells .success = afs_store_data_success, 417e49c7b2fSDavid Howells }; 418e49c7b2fSDavid Howells 419e49c7b2fSDavid Howells /* 420d2ddc776SDavid Howells * write to a file 42131143d5dSDavid Howells */ 422bd80d8a8SDavid Howells static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, 423bd80d8a8SDavid Howells loff_t pos, pgoff_t first, pgoff_t last, 424bd80d8a8SDavid Howells bool laundering) 42531143d5dSDavid Howells { 426e49c7b2fSDavid Howells struct afs_operation *op; 4274343d008SDavid Howells struct afs_wb_key *wbk = NULL; 428bd80d8a8SDavid Howells loff_t size = iov_iter_count(iter), i_size; 429bd80d8a8SDavid Howells int ret = -ENOKEY; 430d2ddc776SDavid Howells 431bd80d8a8SDavid Howells _enter("%s{%llx:%llu.%u},%llx,%llx", 432d2ddc776SDavid Howells vnode->volume->name, 433d2ddc776SDavid Howells vnode->fid.vid, 434d2ddc776SDavid Howells vnode->fid.vnode, 435d2ddc776SDavid Howells vnode->fid.unique, 436bd80d8a8SDavid Howells size, pos); 437d2ddc776SDavid Howells 438e49c7b2fSDavid Howells ret = afs_get_writeback_key(vnode, &wbk); 439e49c7b2fSDavid Howells if (ret) { 4404343d008SDavid Howells _leave(" = %d [no keys]", ret); 4414343d008SDavid Howells return ret; 442d2ddc776SDavid Howells } 443d2ddc776SDavid Howells 444e49c7b2fSDavid Howells op = afs_alloc_operation(wbk->key, vnode->volume); 445e49c7b2fSDavid Howells if (IS_ERR(op)) { 446e49c7b2fSDavid Howells afs_put_wb_key(wbk); 447e49c7b2fSDavid Howells return -ENOMEM; 448d2ddc776SDavid Howells } 449d2ddc776SDavid Howells 450bd80d8a8SDavid Howells i_size = i_size_read(&vnode->vfs_inode); 451bd80d8a8SDavid Howells 452e49c7b2fSDavid Howells afs_op_set_vnode(op, 0, vnode); 453e49c7b2fSDavid Howells op->file[0].dv_delta = 1; 454bd80d8a8SDavid Howells op->store.write_iter = iter; 455bd80d8a8SDavid Howells op->store.pos = pos; 456e49c7b2fSDavid Howells op->store.first = first; 457e49c7b2fSDavid Howells op->store.last = last; 458bd80d8a8SDavid Howells op->store.size = size; 459bd80d8a8SDavid Howells op->store.i_size = max(pos + size, i_size); 460d383e346SDavid Howells op->store.laundering = laundering; 461b3597945SDavid Howells op->mtime = vnode->vfs_inode.i_mtime; 462811f04baSDavid Howells op->flags |= AFS_OPERATION_UNINTR; 463e49c7b2fSDavid Howells op->ops = &afs_store_data_operation; 464e49c7b2fSDavid Howells 465e49c7b2fSDavid Howells try_next_key: 466e49c7b2fSDavid Howells afs_begin_vnode_operation(op); 467e49c7b2fSDavid Howells afs_wait_for_operation(op); 468e49c7b2fSDavid Howells 469e49c7b2fSDavid Howells switch (op->error) { 4704343d008SDavid Howells case -EACCES: 4714343d008SDavid Howells case -EPERM: 4724343d008SDavid Howells case -ENOKEY: 4734343d008SDavid Howells case -EKEYEXPIRED: 4744343d008SDavid Howells case -EKEYREJECTED: 4754343d008SDavid Howells case -EKEYREVOKED: 4764343d008SDavid Howells _debug("next"); 477e49c7b2fSDavid Howells 478e49c7b2fSDavid Howells ret = afs_get_writeback_key(vnode, &wbk); 479e49c7b2fSDavid Howells if (ret == 0) { 480e49c7b2fSDavid Howells key_put(op->key); 481e49c7b2fSDavid Howells op->key = key_get(wbk->key); 4824343d008SDavid Howells goto try_next_key; 4834343d008SDavid Howells } 484e49c7b2fSDavid Howells break; 485e49c7b2fSDavid Howells } 4864343d008SDavid Howells 4874343d008SDavid Howells afs_put_wb_key(wbk); 488e49c7b2fSDavid Howells _leave(" = %d", op->error); 489e49c7b2fSDavid Howells return afs_put_operation(op); 490d2ddc776SDavid Howells } 491d2ddc776SDavid Howells 492d2ddc776SDavid Howells /* 493*810caa3eSDavid Howells * Extend the region to be written back to include subsequent contiguously 494*810caa3eSDavid Howells * dirty pages if possible, but don't sleep while doing so. 495*810caa3eSDavid Howells * 496*810caa3eSDavid Howells * If this page holds new content, then we can include filler zeros in the 497*810caa3eSDavid Howells * writeback. 49831143d5dSDavid Howells */ 499*810caa3eSDavid Howells static void afs_extend_writeback(struct address_space *mapping, 500*810caa3eSDavid Howells struct afs_vnode *vnode, 501*810caa3eSDavid Howells long *_count, 502*810caa3eSDavid Howells pgoff_t start, 503*810caa3eSDavid Howells pgoff_t final_page, 504*810caa3eSDavid Howells unsigned *_offset, 505*810caa3eSDavid Howells unsigned *_to, 506*810caa3eSDavid Howells bool new_content) 50731143d5dSDavid Howells { 50831143d5dSDavid Howells struct page *pages[8], *page; 509*810caa3eSDavid Howells unsigned long count = *_count, priv; 510*810caa3eSDavid Howells unsigned offset = *_offset, to = *_to, n, f, t; 511*810caa3eSDavid Howells int loop; 5124343d008SDavid Howells 51331143d5dSDavid Howells start++; 51431143d5dSDavid Howells do { 51531143d5dSDavid Howells _debug("more %lx [%lx]", start, count); 5164343d008SDavid Howells n = final_page - start + 1; 51731143d5dSDavid Howells if (n > ARRAY_SIZE(pages)) 51831143d5dSDavid Howells n = ARRAY_SIZE(pages); 5194343d008SDavid Howells n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages); 52031143d5dSDavid Howells _debug("fgpc %u", n); 52131143d5dSDavid Howells if (n == 0) 52231143d5dSDavid Howells goto no_more; 52331143d5dSDavid Howells if (pages[0]->index != start) { 5249d577b6aSDavid Howells do { 5259d577b6aSDavid Howells put_page(pages[--n]); 5269d577b6aSDavid Howells } while (n > 0); 52731143d5dSDavid Howells goto no_more; 52831143d5dSDavid Howells } 52931143d5dSDavid Howells 53031143d5dSDavid Howells for (loop = 0; loop < n; loop++) { 53131143d5dSDavid Howells page = pages[loop]; 532*810caa3eSDavid Howells if (to != PAGE_SIZE && !new_content) 5335a813276SDavid Howells break; 5344343d008SDavid Howells if (page->index > final_page) 53531143d5dSDavid Howells break; 536529ae9aaSNick Piggin if (!trylock_page(page)) 53731143d5dSDavid Howells break; 5384343d008SDavid Howells if (!PageDirty(page) || PageWriteback(page)) { 53931143d5dSDavid Howells unlock_page(page); 54031143d5dSDavid Howells break; 54131143d5dSDavid Howells } 5424343d008SDavid Howells 5434343d008SDavid Howells priv = page_private(page); 54467d78a6fSDavid Howells f = afs_page_dirty_from(page, priv); 54567d78a6fSDavid Howells t = afs_page_dirty_to(page, priv); 546*810caa3eSDavid Howells if (f != 0 && !new_content) { 5474343d008SDavid Howells unlock_page(page); 5484343d008SDavid Howells break; 5494343d008SDavid Howells } 5504343d008SDavid Howells to = t; 5514343d008SDavid Howells 55267d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("store+"), page); 55313524ab3SDavid Howells 55431143d5dSDavid Howells if (!clear_page_dirty_for_io(page)) 55531143d5dSDavid Howells BUG(); 55631143d5dSDavid Howells if (test_set_page_writeback(page)) 55731143d5dSDavid Howells BUG(); 55831143d5dSDavid Howells unlock_page(page); 55931143d5dSDavid Howells put_page(page); 56031143d5dSDavid Howells } 56131143d5dSDavid Howells count += loop; 56231143d5dSDavid Howells if (loop < n) { 56331143d5dSDavid Howells for (; loop < n; loop++) 56431143d5dSDavid Howells put_page(pages[loop]); 56531143d5dSDavid Howells goto no_more; 56631143d5dSDavid Howells } 56731143d5dSDavid Howells 56831143d5dSDavid Howells start += loop; 5694343d008SDavid Howells } while (start <= final_page && count < 65536); 57031143d5dSDavid Howells 57131143d5dSDavid Howells no_more: 572*810caa3eSDavid Howells *_count = count; 573*810caa3eSDavid Howells *_offset = offset; 574*810caa3eSDavid Howells *_to = to; 575*810caa3eSDavid Howells } 576*810caa3eSDavid Howells 577*810caa3eSDavid Howells /* 578*810caa3eSDavid Howells * Synchronously write back the locked page and any subsequent non-locked dirty 579*810caa3eSDavid Howells * pages. 580*810caa3eSDavid Howells */ 581*810caa3eSDavid Howells static int afs_write_back_from_locked_page(struct address_space *mapping, 582*810caa3eSDavid Howells struct writeback_control *wbc, 583*810caa3eSDavid Howells struct page *primary_page, 584*810caa3eSDavid Howells pgoff_t final_page) 585*810caa3eSDavid Howells { 586*810caa3eSDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 587*810caa3eSDavid Howells struct iov_iter iter; 588*810caa3eSDavid Howells unsigned long count, priv; 589*810caa3eSDavid Howells unsigned offset, to; 590*810caa3eSDavid Howells pgoff_t start, first, last; 591*810caa3eSDavid Howells loff_t i_size, pos, end; 592*810caa3eSDavid Howells bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); 593*810caa3eSDavid Howells int ret; 594*810caa3eSDavid Howells 595*810caa3eSDavid Howells _enter(",%lx", primary_page->index); 596*810caa3eSDavid Howells 597*810caa3eSDavid Howells count = 1; 598*810caa3eSDavid Howells if (test_set_page_writeback(primary_page)) 599*810caa3eSDavid Howells BUG(); 600*810caa3eSDavid Howells 601*810caa3eSDavid Howells /* Find all consecutive lockable dirty pages that have contiguous 602*810caa3eSDavid Howells * written regions, stopping when we find a page that is not 603*810caa3eSDavid Howells * immediately lockable, is not dirty or is missing, or we reach the 604*810caa3eSDavid Howells * end of the range. 605*810caa3eSDavid Howells */ 606*810caa3eSDavid Howells start = primary_page->index; 607*810caa3eSDavid Howells priv = page_private(primary_page); 608*810caa3eSDavid Howells offset = afs_page_dirty_from(primary_page, priv); 609*810caa3eSDavid Howells to = afs_page_dirty_to(primary_page, priv); 610*810caa3eSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page); 611*810caa3eSDavid Howells 612*810caa3eSDavid Howells WARN_ON(offset == to); 613*810caa3eSDavid Howells if (offset == to) 614*810caa3eSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("WARN"), primary_page); 615*810caa3eSDavid Howells 616*810caa3eSDavid Howells if (start < final_page && 617*810caa3eSDavid Howells (to == PAGE_SIZE || new_content)) 618*810caa3eSDavid Howells afs_extend_writeback(mapping, vnode, &count, start, final_page, 619*810caa3eSDavid Howells &offset, &to, new_content); 620*810caa3eSDavid Howells 6214343d008SDavid Howells /* We now have a contiguous set of dirty pages, each with writeback 6224343d008SDavid Howells * set; the first page is still locked at this point, but all the rest 6234343d008SDavid Howells * have been unlocked. 6244343d008SDavid Howells */ 6254343d008SDavid Howells unlock_page(primary_page); 6264343d008SDavid Howells 62731143d5dSDavid Howells first = primary_page->index; 62831143d5dSDavid Howells last = first + count - 1; 62931143d5dSDavid Howells _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to); 63031143d5dSDavid Howells 631bd80d8a8SDavid Howells pos = first; 632bd80d8a8SDavid Howells pos <<= PAGE_SHIFT; 633bd80d8a8SDavid Howells pos += offset; 634bd80d8a8SDavid Howells end = last; 635bd80d8a8SDavid Howells end <<= PAGE_SHIFT; 636bd80d8a8SDavid Howells end += to; 637bd80d8a8SDavid Howells 638bd80d8a8SDavid Howells /* Trim the actual write down to the EOF */ 639bd80d8a8SDavid Howells i_size = i_size_read(&vnode->vfs_inode); 640bd80d8a8SDavid Howells if (end > i_size) 641bd80d8a8SDavid Howells end = i_size; 642bd80d8a8SDavid Howells 643bd80d8a8SDavid Howells if (pos < i_size) { 644bd80d8a8SDavid Howells iov_iter_xarray(&iter, WRITE, &mapping->i_pages, pos, end - pos); 645bd80d8a8SDavid Howells ret = afs_store_data(vnode, &iter, pos, first, last, false); 646bd80d8a8SDavid Howells } else { 647bd80d8a8SDavid Howells /* The dirty region was entirely beyond the EOF. */ 648bd80d8a8SDavid Howells ret = 0; 649bd80d8a8SDavid Howells } 650bd80d8a8SDavid Howells 65131143d5dSDavid Howells switch (ret) { 6524343d008SDavid Howells case 0: 6534343d008SDavid Howells ret = count; 6544343d008SDavid Howells break; 6554343d008SDavid Howells 6564343d008SDavid Howells default: 6574343d008SDavid Howells pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret); 658df561f66SGustavo A. R. Silva fallthrough; 6594343d008SDavid Howells case -EACCES: 6604343d008SDavid Howells case -EPERM: 6614343d008SDavid Howells case -ENOKEY: 6624343d008SDavid Howells case -EKEYEXPIRED: 6634343d008SDavid Howells case -EKEYREJECTED: 6644343d008SDavid Howells case -EKEYREVOKED: 6654343d008SDavid Howells afs_redirty_pages(wbc, mapping, first, last); 6664343d008SDavid Howells mapping_set_error(mapping, ret); 6674343d008SDavid Howells break; 6684343d008SDavid Howells 66931143d5dSDavid Howells case -EDQUOT: 67031143d5dSDavid Howells case -ENOSPC: 6714343d008SDavid Howells afs_redirty_pages(wbc, mapping, first, last); 6724343d008SDavid Howells mapping_set_error(mapping, -ENOSPC); 67331143d5dSDavid Howells break; 6744343d008SDavid Howells 67531143d5dSDavid Howells case -EROFS: 67631143d5dSDavid Howells case -EIO: 67731143d5dSDavid Howells case -EREMOTEIO: 67831143d5dSDavid Howells case -EFBIG: 67931143d5dSDavid Howells case -ENOENT: 68031143d5dSDavid Howells case -ENOMEDIUM: 68131143d5dSDavid Howells case -ENXIO: 682f51375cdSDavid Howells trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail); 6834343d008SDavid Howells afs_kill_pages(mapping, first, last); 6844343d008SDavid Howells mapping_set_error(mapping, ret); 68531143d5dSDavid Howells break; 68631143d5dSDavid Howells } 68731143d5dSDavid Howells 68831143d5dSDavid Howells _leave(" = %d", ret); 68931143d5dSDavid Howells return ret; 69031143d5dSDavid Howells } 69131143d5dSDavid Howells 69231143d5dSDavid Howells /* 69331143d5dSDavid Howells * write a page back to the server 69431143d5dSDavid Howells * - the caller locked the page for us 69531143d5dSDavid Howells */ 69631143d5dSDavid Howells int afs_writepage(struct page *page, struct writeback_control *wbc) 69731143d5dSDavid Howells { 69831143d5dSDavid Howells int ret; 69931143d5dSDavid Howells 70031143d5dSDavid Howells _enter("{%lx},", page->index); 70131143d5dSDavid Howells 7024343d008SDavid Howells ret = afs_write_back_from_locked_page(page->mapping, wbc, page, 7034343d008SDavid Howells wbc->range_end >> PAGE_SHIFT); 70431143d5dSDavid Howells if (ret < 0) { 70531143d5dSDavid Howells _leave(" = %d", ret); 70631143d5dSDavid Howells return 0; 70731143d5dSDavid Howells } 70831143d5dSDavid Howells 70931143d5dSDavid Howells wbc->nr_to_write -= ret; 71031143d5dSDavid Howells 71131143d5dSDavid Howells _leave(" = 0"); 71231143d5dSDavid Howells return 0; 71331143d5dSDavid Howells } 71431143d5dSDavid Howells 71531143d5dSDavid Howells /* 71631143d5dSDavid Howells * write a region of pages back to the server 71731143d5dSDavid Howells */ 718c1206a2cSAdrian Bunk static int afs_writepages_region(struct address_space *mapping, 71931143d5dSDavid Howells struct writeback_control *wbc, 72031143d5dSDavid Howells pgoff_t index, pgoff_t end, pgoff_t *_next) 72131143d5dSDavid Howells { 72231143d5dSDavid Howells struct page *page; 72331143d5dSDavid Howells int ret, n; 72431143d5dSDavid Howells 72531143d5dSDavid Howells _enter(",,%lx,%lx,", index, end); 72631143d5dSDavid Howells 72731143d5dSDavid Howells do { 728aef6e415SJan Kara n = find_get_pages_range_tag(mapping, &index, end, 729aef6e415SJan Kara PAGECACHE_TAG_DIRTY, 1, &page); 73031143d5dSDavid Howells if (!n) 73131143d5dSDavid Howells break; 73231143d5dSDavid Howells 73331143d5dSDavid Howells _debug("wback %lx", page->index); 73431143d5dSDavid Howells 735b93b0163SMatthew Wilcox /* 736b93b0163SMatthew Wilcox * at this point we hold neither the i_pages lock nor the 737b93b0163SMatthew Wilcox * page lock: the page may be truncated or invalidated 738b93b0163SMatthew Wilcox * (changing page->mapping to NULL), or even swizzled 739b93b0163SMatthew Wilcox * back from swapper_space to tmpfs file mapping 74031143d5dSDavid Howells */ 7414343d008SDavid Howells ret = lock_page_killable(page); 7424343d008SDavid Howells if (ret < 0) { 7434343d008SDavid Howells put_page(page); 7444343d008SDavid Howells _leave(" = %d", ret); 7454343d008SDavid Howells return ret; 7464343d008SDavid Howells } 74731143d5dSDavid Howells 748c5051c7bSDavid Howells if (page->mapping != mapping || !PageDirty(page)) { 74931143d5dSDavid Howells unlock_page(page); 75009cbfeafSKirill A. Shutemov put_page(page); 75131143d5dSDavid Howells continue; 75231143d5dSDavid Howells } 75331143d5dSDavid Howells 754c5051c7bSDavid Howells if (PageWriteback(page)) { 755c5051c7bSDavid Howells unlock_page(page); 75631143d5dSDavid Howells if (wbc->sync_mode != WB_SYNC_NONE) 75731143d5dSDavid Howells wait_on_page_writeback(page); 75829c8bbbdSDavid Howells put_page(page); 75931143d5dSDavid Howells continue; 76031143d5dSDavid Howells } 76131143d5dSDavid Howells 76265a15109SDavid Howells if (!clear_page_dirty_for_io(page)) 76365a15109SDavid Howells BUG(); 7644343d008SDavid Howells ret = afs_write_back_from_locked_page(mapping, wbc, page, end); 76509cbfeafSKirill A. Shutemov put_page(page); 76631143d5dSDavid Howells if (ret < 0) { 76731143d5dSDavid Howells _leave(" = %d", ret); 76831143d5dSDavid Howells return ret; 76931143d5dSDavid Howells } 77031143d5dSDavid Howells 77131143d5dSDavid Howells wbc->nr_to_write -= ret; 77231143d5dSDavid Howells 77331143d5dSDavid Howells cond_resched(); 77431143d5dSDavid Howells } while (index < end && wbc->nr_to_write > 0); 77531143d5dSDavid Howells 77631143d5dSDavid Howells *_next = index; 77731143d5dSDavid Howells _leave(" = 0 [%lx]", *_next); 77831143d5dSDavid Howells return 0; 77931143d5dSDavid Howells } 78031143d5dSDavid Howells 78131143d5dSDavid Howells /* 78231143d5dSDavid Howells * write some of the pending data back to the server 78331143d5dSDavid Howells */ 78431143d5dSDavid Howells int afs_writepages(struct address_space *mapping, 78531143d5dSDavid Howells struct writeback_control *wbc) 78631143d5dSDavid Howells { 787ec0fa0b6SDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 78831143d5dSDavid Howells pgoff_t start, end, next; 78931143d5dSDavid Howells int ret; 79031143d5dSDavid Howells 79131143d5dSDavid Howells _enter(""); 79231143d5dSDavid Howells 793ec0fa0b6SDavid Howells /* We have to be careful as we can end up racing with setattr() 794ec0fa0b6SDavid Howells * truncating the pagecache since the caller doesn't take a lock here 795ec0fa0b6SDavid Howells * to prevent it. 796ec0fa0b6SDavid Howells */ 797ec0fa0b6SDavid Howells if (wbc->sync_mode == WB_SYNC_ALL) 798ec0fa0b6SDavid Howells down_read(&vnode->validate_lock); 799ec0fa0b6SDavid Howells else if (!down_read_trylock(&vnode->validate_lock)) 800ec0fa0b6SDavid Howells return 0; 801ec0fa0b6SDavid Howells 80231143d5dSDavid Howells if (wbc->range_cyclic) { 80331143d5dSDavid Howells start = mapping->writeback_index; 80431143d5dSDavid Howells end = -1; 80531143d5dSDavid Howells ret = afs_writepages_region(mapping, wbc, start, end, &next); 8061b430beeSWu Fengguang if (start > 0 && wbc->nr_to_write > 0 && ret == 0) 80731143d5dSDavid Howells ret = afs_writepages_region(mapping, wbc, 0, start, 80831143d5dSDavid Howells &next); 80931143d5dSDavid Howells mapping->writeback_index = next; 81031143d5dSDavid Howells } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { 81109cbfeafSKirill A. Shutemov end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT); 81231143d5dSDavid Howells ret = afs_writepages_region(mapping, wbc, 0, end, &next); 81331143d5dSDavid Howells if (wbc->nr_to_write > 0) 81431143d5dSDavid Howells mapping->writeback_index = next; 81531143d5dSDavid Howells } else { 81609cbfeafSKirill A. Shutemov start = wbc->range_start >> PAGE_SHIFT; 81709cbfeafSKirill A. Shutemov end = wbc->range_end >> PAGE_SHIFT; 81831143d5dSDavid Howells ret = afs_writepages_region(mapping, wbc, start, end, &next); 81931143d5dSDavid Howells } 82031143d5dSDavid Howells 821ec0fa0b6SDavid Howells up_read(&vnode->validate_lock); 82231143d5dSDavid Howells _leave(" = %d", ret); 82331143d5dSDavid Howells return ret; 82431143d5dSDavid Howells } 82531143d5dSDavid Howells 82631143d5dSDavid Howells /* 82731143d5dSDavid Howells * write to an AFS file 82831143d5dSDavid Howells */ 82950b5551dSAl Viro ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) 83031143d5dSDavid Howells { 831496ad9aaSAl Viro struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp)); 83231143d5dSDavid Howells ssize_t result; 83350b5551dSAl Viro size_t count = iov_iter_count(from); 83431143d5dSDavid Howells 8353b6492dfSDavid Howells _enter("{%llx:%llu},{%zu},", 83650b5551dSAl Viro vnode->fid.vid, vnode->fid.vnode, count); 83731143d5dSDavid Howells 83831143d5dSDavid Howells if (IS_SWAPFILE(&vnode->vfs_inode)) { 83931143d5dSDavid Howells printk(KERN_INFO 84031143d5dSDavid Howells "AFS: Attempt to write to active swap file!\n"); 84131143d5dSDavid Howells return -EBUSY; 84231143d5dSDavid Howells } 84331143d5dSDavid Howells 84431143d5dSDavid Howells if (!count) 84531143d5dSDavid Howells return 0; 84631143d5dSDavid Howells 84750b5551dSAl Viro result = generic_file_write_iter(iocb, from); 84831143d5dSDavid Howells 84931143d5dSDavid Howells _leave(" = %zd", result); 85031143d5dSDavid Howells return result; 85131143d5dSDavid Howells } 85231143d5dSDavid Howells 85331143d5dSDavid Howells /* 85431143d5dSDavid Howells * flush any dirty pages for this process, and check for write errors. 85531143d5dSDavid Howells * - the return status from this call provides a reliable indication of 85631143d5dSDavid Howells * whether any write errors occurred for this process. 85731143d5dSDavid Howells */ 85802c24a82SJosef Bacik int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 85931143d5dSDavid Howells { 8603c981bfcSAl Viro struct inode *inode = file_inode(file); 8613c981bfcSAl Viro struct afs_vnode *vnode = AFS_FS_I(inode); 86231143d5dSDavid Howells 8633b6492dfSDavid Howells _enter("{%llx:%llu},{n=%pD},%d", 8643c981bfcSAl Viro vnode->fid.vid, vnode->fid.vnode, file, 86531143d5dSDavid Howells datasync); 86631143d5dSDavid Howells 8674343d008SDavid Howells return file_write_and_wait_range(file, start, end); 86831143d5dSDavid Howells } 8699b3f26c9SDavid Howells 8709b3f26c9SDavid Howells /* 8719b3f26c9SDavid Howells * notification that a previously read-only page is about to become writable 8729b3f26c9SDavid Howells * - if it returns an error, the caller will deliver a bus error signal 8739b3f26c9SDavid Howells */ 8740722f186SSouptick Joarder vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) 8759b3f26c9SDavid Howells { 8761cf7a151SDavid Howells struct file *file = vmf->vma->vm_file; 8771cf7a151SDavid Howells struct inode *inode = file_inode(file); 8781cf7a151SDavid Howells struct afs_vnode *vnode = AFS_FS_I(inode); 8791cf7a151SDavid Howells unsigned long priv; 8809b3f26c9SDavid Howells 8813b6492dfSDavid Howells _enter("{{%llx:%llu}},{%lx}", 8821cf7a151SDavid Howells vnode->fid.vid, vnode->fid.vnode, vmf->page->index); 8839b3f26c9SDavid Howells 8841cf7a151SDavid Howells sb_start_pagefault(inode->i_sb); 8851cf7a151SDavid Howells 8861cf7a151SDavid Howells /* Wait for the page to be written to the cache before we allow it to 8871cf7a151SDavid Howells * be modified. We then assume the entire page will need writing back. 8881cf7a151SDavid Howells */ 889630f5ddaSDavid Howells #ifdef CONFIG_AFS_FSCACHE 890630f5ddaSDavid Howells if (PageFsCache(vmf->page) && 891630f5ddaSDavid Howells wait_on_page_bit_killable(vmf->page, PG_fscache) < 0) 892630f5ddaSDavid Howells return VM_FAULT_RETRY; 893630f5ddaSDavid Howells #endif 8949b3f26c9SDavid Howells 89575b69799SMatthew Wilcox (Oracle) if (wait_on_page_writeback_killable(vmf->page)) 8961cf7a151SDavid Howells return VM_FAULT_RETRY; 8971cf7a151SDavid Howells 8981cf7a151SDavid Howells if (lock_page_killable(vmf->page) < 0) 8991cf7a151SDavid Howells return VM_FAULT_RETRY; 9001cf7a151SDavid Howells 9011cf7a151SDavid Howells /* We mustn't change page->private until writeback is complete as that 9021cf7a151SDavid Howells * details the portion of the page we need to write back and we might 9031cf7a151SDavid Howells * need to redirty the page if there's a problem. 9041cf7a151SDavid Howells */ 9051cf7a151SDavid Howells wait_on_page_writeback(vmf->page); 9061cf7a151SDavid Howells 90767d78a6fSDavid Howells priv = afs_page_dirty(vmf->page, 0, PAGE_SIZE); 908f86726a6SDavid Howells priv = afs_page_dirty_mmapped(priv); 909fa04a40bSDavid Howells if (PagePrivate(vmf->page)) 9101cf7a151SDavid Howells set_page_private(vmf->page, priv); 911fa04a40bSDavid Howells else 912fa04a40bSDavid Howells attach_page_private(vmf->page, (void *)priv); 91367d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), vmf->page); 914bb413489SDavid Howells file_update_time(file); 9151cf7a151SDavid Howells 9161cf7a151SDavid Howells sb_end_pagefault(inode->i_sb); 9171cf7a151SDavid Howells return VM_FAULT_LOCKED; 9189b3f26c9SDavid Howells } 9194343d008SDavid Howells 9204343d008SDavid Howells /* 9214343d008SDavid Howells * Prune the keys cached for writeback. The caller must hold vnode->wb_lock. 9224343d008SDavid Howells */ 9234343d008SDavid Howells void afs_prune_wb_keys(struct afs_vnode *vnode) 9244343d008SDavid Howells { 9254343d008SDavid Howells LIST_HEAD(graveyard); 9264343d008SDavid Howells struct afs_wb_key *wbk, *tmp; 9274343d008SDavid Howells 9284343d008SDavid Howells /* Discard unused keys */ 9294343d008SDavid Howells spin_lock(&vnode->wb_lock); 9304343d008SDavid Howells 9314343d008SDavid Howells if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) && 9324343d008SDavid Howells !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) { 9334343d008SDavid Howells list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { 9344343d008SDavid Howells if (refcount_read(&wbk->usage) == 1) 9354343d008SDavid Howells list_move(&wbk->vnode_link, &graveyard); 9364343d008SDavid Howells } 9374343d008SDavid Howells } 9384343d008SDavid Howells 9394343d008SDavid Howells spin_unlock(&vnode->wb_lock); 9404343d008SDavid Howells 9414343d008SDavid Howells while (!list_empty(&graveyard)) { 9424343d008SDavid Howells wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link); 9434343d008SDavid Howells list_del(&wbk->vnode_link); 9444343d008SDavid Howells afs_put_wb_key(wbk); 9454343d008SDavid Howells } 9464343d008SDavid Howells } 9474343d008SDavid Howells 9484343d008SDavid Howells /* 9494343d008SDavid Howells * Clean up a page during invalidation. 9504343d008SDavid Howells */ 9514343d008SDavid Howells int afs_launder_page(struct page *page) 9524343d008SDavid Howells { 9534343d008SDavid Howells struct address_space *mapping = page->mapping; 9544343d008SDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 955bd80d8a8SDavid Howells struct iov_iter iter; 956bd80d8a8SDavid Howells struct bio_vec bv[1]; 9574343d008SDavid Howells unsigned long priv; 9584343d008SDavid Howells unsigned int f, t; 9594343d008SDavid Howells int ret = 0; 9604343d008SDavid Howells 9614343d008SDavid Howells _enter("{%lx}", page->index); 9624343d008SDavid Howells 9634343d008SDavid Howells priv = page_private(page); 9644343d008SDavid Howells if (clear_page_dirty_for_io(page)) { 9654343d008SDavid Howells f = 0; 9664343d008SDavid Howells t = PAGE_SIZE; 9674343d008SDavid Howells if (PagePrivate(page)) { 96867d78a6fSDavid Howells f = afs_page_dirty_from(page, priv); 96967d78a6fSDavid Howells t = afs_page_dirty_to(page, priv); 9704343d008SDavid Howells } 9714343d008SDavid Howells 972bd80d8a8SDavid Howells bv[0].bv_page = page; 973bd80d8a8SDavid Howells bv[0].bv_offset = f; 974bd80d8a8SDavid Howells bv[0].bv_len = t - f; 975bd80d8a8SDavid Howells iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len); 976bd80d8a8SDavid Howells 97767d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("launder"), page); 978bd80d8a8SDavid Howells ret = afs_store_data(vnode, &iter, (loff_t)page->index << PAGE_SHIFT, 979bd80d8a8SDavid Howells page->index, page->index, true); 9804343d008SDavid Howells } 9814343d008SDavid Howells 98267d78a6fSDavid Howells detach_page_private(page); 98367d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page); 984630f5ddaSDavid Howells wait_on_page_fscache(page); 9854343d008SDavid Howells return ret; 98631143d5dSDavid Howells } 987