131143d5dSDavid Howells /* handling of writes to regular files and writing back to the server 231143d5dSDavid Howells * 331143d5dSDavid Howells * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 431143d5dSDavid Howells * Written by David Howells (dhowells@redhat.com) 531143d5dSDavid Howells * 631143d5dSDavid Howells * This program is free software; you can redistribute it and/or 731143d5dSDavid Howells * modify it under the terms of the GNU General Public License 831143d5dSDavid Howells * as published by the Free Software Foundation; either version 931143d5dSDavid Howells * 2 of the License, or (at your option) any later version. 1031143d5dSDavid Howells */ 114af3c9ccSAlexey Dobriyan #include <linux/backing-dev.h> 1231143d5dSDavid Howells #include <linux/slab.h> 1331143d5dSDavid Howells #include <linux/fs.h> 1431143d5dSDavid Howells #include <linux/pagemap.h> 1531143d5dSDavid Howells #include <linux/writeback.h> 1631143d5dSDavid Howells #include <linux/pagevec.h> 1731143d5dSDavid Howells #include "internal.h" 1831143d5dSDavid Howells 1931143d5dSDavid Howells static int afs_write_back_from_locked_page(struct afs_writeback *wb, 2031143d5dSDavid Howells struct page *page); 2131143d5dSDavid Howells 2231143d5dSDavid Howells /* 2331143d5dSDavid Howells * mark a page as having been made dirty and thus needing writeback 2431143d5dSDavid Howells */ 2531143d5dSDavid Howells int afs_set_page_dirty(struct page *page) 2631143d5dSDavid Howells { 2731143d5dSDavid Howells _enter(""); 2831143d5dSDavid Howells return __set_page_dirty_nobuffers(page); 2931143d5dSDavid Howells } 3031143d5dSDavid Howells 3131143d5dSDavid Howells /* 3231143d5dSDavid Howells * unlink a writeback record because its usage has reached zero 3331143d5dSDavid Howells * - must be called with the wb->vnode->writeback_lock held 3431143d5dSDavid Howells */ 3531143d5dSDavid Howells static void afs_unlink_writeback(struct afs_writeback *wb) 3631143d5dSDavid Howells { 3731143d5dSDavid Howells struct afs_writeback *front; 3831143d5dSDavid Howells struct afs_vnode *vnode = wb->vnode; 3931143d5dSDavid Howells 4031143d5dSDavid Howells list_del_init(&wb->link); 4131143d5dSDavid Howells if (!list_empty(&vnode->writebacks)) { 4231143d5dSDavid Howells /* if an fsync rises to the front of the queue then wake it 4331143d5dSDavid Howells * up */ 4431143d5dSDavid Howells front = list_entry(vnode->writebacks.next, 4531143d5dSDavid Howells struct afs_writeback, link); 4631143d5dSDavid Howells if (front->state == AFS_WBACK_SYNCING) { 4731143d5dSDavid Howells _debug("wake up sync"); 4831143d5dSDavid Howells front->state = AFS_WBACK_COMPLETE; 4931143d5dSDavid Howells wake_up(&front->waitq); 5031143d5dSDavid Howells } 5131143d5dSDavid Howells } 5231143d5dSDavid Howells } 5331143d5dSDavid Howells 5431143d5dSDavid Howells /* 5531143d5dSDavid Howells * free a writeback record 5631143d5dSDavid Howells */ 5731143d5dSDavid Howells static void afs_free_writeback(struct afs_writeback *wb) 5831143d5dSDavid Howells { 5931143d5dSDavid Howells _enter(""); 6031143d5dSDavid Howells key_put(wb->key); 6131143d5dSDavid Howells kfree(wb); 6231143d5dSDavid Howells } 6331143d5dSDavid Howells 6431143d5dSDavid Howells /* 6531143d5dSDavid Howells * dispose of a reference to a writeback record 6631143d5dSDavid Howells */ 6731143d5dSDavid Howells void afs_put_writeback(struct afs_writeback *wb) 6831143d5dSDavid Howells { 6931143d5dSDavid Howells struct afs_vnode *vnode = wb->vnode; 7031143d5dSDavid Howells 7131143d5dSDavid Howells _enter("{%d}", wb->usage); 7231143d5dSDavid Howells 7331143d5dSDavid Howells spin_lock(&vnode->writeback_lock); 7431143d5dSDavid Howells if (--wb->usage == 0) 7531143d5dSDavid Howells afs_unlink_writeback(wb); 7631143d5dSDavid Howells else 7731143d5dSDavid Howells wb = NULL; 7831143d5dSDavid Howells spin_unlock(&vnode->writeback_lock); 7931143d5dSDavid Howells if (wb) 8031143d5dSDavid Howells afs_free_writeback(wb); 8131143d5dSDavid Howells } 8231143d5dSDavid Howells 8331143d5dSDavid Howells /* 8431143d5dSDavid Howells * partly or wholly fill a page that's under preparation for writing 8531143d5dSDavid Howells */ 8631143d5dSDavid Howells static int afs_fill_page(struct afs_vnode *vnode, struct key *key, 87e8e581a8SDavid Howells loff_t pos, unsigned int len, struct page *page) 8831143d5dSDavid Howells { 89196ee9cdSDavid Howells struct afs_read *req; 9031143d5dSDavid Howells int ret; 9131143d5dSDavid Howells 925e7f2337SAnton Blanchard _enter(",,%llu", (unsigned long long)pos); 9331143d5dSDavid Howells 94196ee9cdSDavid Howells req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *), 95196ee9cdSDavid Howells GFP_KERNEL); 96196ee9cdSDavid Howells if (!req) 97196ee9cdSDavid Howells return -ENOMEM; 98196ee9cdSDavid Howells 99196ee9cdSDavid Howells atomic_set(&req->usage, 1); 100196ee9cdSDavid Howells req->pos = pos; 101e8e581a8SDavid Howells req->len = len; 102196ee9cdSDavid Howells req->nr_pages = 1; 103196ee9cdSDavid Howells req->pages[0] = page; 1045611ef28SDavid Howells get_page(page); 105196ee9cdSDavid Howells 106d2ddc776SDavid Howells ret = afs_fetch_data(vnode, key, req); 107196ee9cdSDavid Howells afs_put_read(req); 10831143d5dSDavid Howells if (ret < 0) { 10931143d5dSDavid Howells if (ret == -ENOENT) { 11031143d5dSDavid Howells _debug("got NOENT from server" 11131143d5dSDavid Howells " - marking file deleted and stale"); 11231143d5dSDavid Howells set_bit(AFS_VNODE_DELETED, &vnode->flags); 11331143d5dSDavid Howells ret = -ESTALE; 11431143d5dSDavid Howells } 11531143d5dSDavid Howells } 11631143d5dSDavid Howells 11731143d5dSDavid Howells _leave(" = %d", ret); 11831143d5dSDavid Howells return ret; 11931143d5dSDavid Howells } 12031143d5dSDavid Howells 12131143d5dSDavid Howells /* 12231143d5dSDavid Howells * prepare to perform part of a write to a page 12331143d5dSDavid Howells */ 12415b4650eSNick Piggin int afs_write_begin(struct file *file, struct address_space *mapping, 12515b4650eSNick Piggin loff_t pos, unsigned len, unsigned flags, 12615b4650eSNick Piggin struct page **pagep, void **fsdata) 12731143d5dSDavid Howells { 12831143d5dSDavid Howells struct afs_writeback *candidate, *wb; 129496ad9aaSAl Viro struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 13015b4650eSNick Piggin struct page *page; 131215804a9SDavid Howells struct key *key = afs_file_key(file); 13209cbfeafSKirill A. Shutemov unsigned from = pos & (PAGE_SIZE - 1); 13315b4650eSNick Piggin unsigned to = from + len; 13409cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT; 13531143d5dSDavid Howells int ret; 13631143d5dSDavid Howells 13731143d5dSDavid Howells _enter("{%x:%u},{%lx},%u,%u", 13815b4650eSNick Piggin vnode->fid.vid, vnode->fid.vnode, index, from, to); 13931143d5dSDavid Howells 14031143d5dSDavid Howells candidate = kzalloc(sizeof(*candidate), GFP_KERNEL); 14131143d5dSDavid Howells if (!candidate) 14231143d5dSDavid Howells return -ENOMEM; 14331143d5dSDavid Howells candidate->vnode = vnode; 14415b4650eSNick Piggin candidate->first = candidate->last = index; 14515b4650eSNick Piggin candidate->offset_first = from; 14631143d5dSDavid Howells candidate->to_last = to; 147f129ccc9SAnton Blanchard INIT_LIST_HEAD(&candidate->link); 14831143d5dSDavid Howells candidate->usage = 1; 14931143d5dSDavid Howells candidate->state = AFS_WBACK_PENDING; 15031143d5dSDavid Howells init_waitqueue_head(&candidate->waitq); 15131143d5dSDavid Howells 15254566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 15315b4650eSNick Piggin if (!page) { 15415b4650eSNick Piggin kfree(candidate); 15515b4650eSNick Piggin return -ENOMEM; 15615b4650eSNick Piggin } 15715b4650eSNick Piggin 15809cbfeafSKirill A. Shutemov if (!PageUptodate(page) && len != PAGE_SIZE) { 159e8e581a8SDavid Howells ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page); 16031143d5dSDavid Howells if (ret < 0) { 1616d06b0d2SDavid Howells unlock_page(page); 1626d06b0d2SDavid Howells put_page(page); 16331143d5dSDavid Howells kfree(candidate); 16431143d5dSDavid Howells _leave(" = %d [prep]", ret); 16531143d5dSDavid Howells return ret; 16631143d5dSDavid Howells } 16715b4650eSNick Piggin SetPageUptodate(page); 16831143d5dSDavid Howells } 16931143d5dSDavid Howells 1706d06b0d2SDavid Howells /* page won't leak in error case: it eventually gets cleaned off LRU */ 1716d06b0d2SDavid Howells *pagep = page; 1726d06b0d2SDavid Howells 17331143d5dSDavid Howells try_again: 17431143d5dSDavid Howells spin_lock(&vnode->writeback_lock); 17531143d5dSDavid Howells 17631143d5dSDavid Howells /* see if this page is already pending a writeback under a suitable key 17731143d5dSDavid Howells * - if so we can just join onto that one */ 17831143d5dSDavid Howells wb = (struct afs_writeback *) page_private(page); 17931143d5dSDavid Howells if (wb) { 18031143d5dSDavid Howells if (wb->key == key && wb->state == AFS_WBACK_PENDING) 18131143d5dSDavid Howells goto subsume_in_current_wb; 18231143d5dSDavid Howells goto flush_conflicting_wb; 18331143d5dSDavid Howells } 18431143d5dSDavid Howells 18531143d5dSDavid Howells if (index > 0) { 18631143d5dSDavid Howells /* see if we can find an already pending writeback that we can 18731143d5dSDavid Howells * append this page to */ 18831143d5dSDavid Howells list_for_each_entry(wb, &vnode->writebacks, link) { 18931143d5dSDavid Howells if (wb->last == index - 1 && wb->key == key && 19031143d5dSDavid Howells wb->state == AFS_WBACK_PENDING) 19131143d5dSDavid Howells goto append_to_previous_wb; 19231143d5dSDavid Howells } 19331143d5dSDavid Howells } 19431143d5dSDavid Howells 19531143d5dSDavid Howells list_add_tail(&candidate->link, &vnode->writebacks); 19631143d5dSDavid Howells candidate->key = key_get(key); 19731143d5dSDavid Howells spin_unlock(&vnode->writeback_lock); 19831143d5dSDavid Howells SetPagePrivate(page); 19931143d5dSDavid Howells set_page_private(page, (unsigned long) candidate); 20031143d5dSDavid Howells _leave(" = 0 [new]"); 20131143d5dSDavid Howells return 0; 20231143d5dSDavid Howells 20331143d5dSDavid Howells subsume_in_current_wb: 20431143d5dSDavid Howells _debug("subsume"); 20531143d5dSDavid Howells ASSERTRANGE(wb->first, <=, index, <=, wb->last); 20615b4650eSNick Piggin if (index == wb->first && from < wb->offset_first) 20715b4650eSNick Piggin wb->offset_first = from; 20831143d5dSDavid Howells if (index == wb->last && to > wb->to_last) 20931143d5dSDavid Howells wb->to_last = to; 21031143d5dSDavid Howells spin_unlock(&vnode->writeback_lock); 21131143d5dSDavid Howells kfree(candidate); 21231143d5dSDavid Howells _leave(" = 0 [sub]"); 21331143d5dSDavid Howells return 0; 21431143d5dSDavid Howells 21531143d5dSDavid Howells append_to_previous_wb: 21631143d5dSDavid Howells _debug("append into %lx-%lx", wb->first, wb->last); 21731143d5dSDavid Howells wb->usage++; 21831143d5dSDavid Howells wb->last++; 21931143d5dSDavid Howells wb->to_last = to; 22031143d5dSDavid Howells spin_unlock(&vnode->writeback_lock); 22131143d5dSDavid Howells SetPagePrivate(page); 22231143d5dSDavid Howells set_page_private(page, (unsigned long) wb); 22331143d5dSDavid Howells kfree(candidate); 22431143d5dSDavid Howells _leave(" = 0 [app]"); 22531143d5dSDavid Howells return 0; 22631143d5dSDavid Howells 22731143d5dSDavid Howells /* the page is currently bound to another context, so if it's dirty we 22831143d5dSDavid Howells * need to flush it before we can use the new context */ 22931143d5dSDavid Howells flush_conflicting_wb: 23031143d5dSDavid Howells _debug("flush conflict"); 23131143d5dSDavid Howells if (wb->state == AFS_WBACK_PENDING) 23231143d5dSDavid Howells wb->state = AFS_WBACK_CONFLICTING; 23331143d5dSDavid Howells spin_unlock(&vnode->writeback_lock); 23465a15109SDavid Howells if (clear_page_dirty_for_io(page)) { 23531143d5dSDavid Howells ret = afs_write_back_from_locked_page(wb, page); 23631143d5dSDavid Howells if (ret < 0) { 23731143d5dSDavid Howells afs_put_writeback(candidate); 23831143d5dSDavid Howells _leave(" = %d", ret); 23931143d5dSDavid Howells return ret; 24031143d5dSDavid Howells } 24131143d5dSDavid Howells } 24231143d5dSDavid Howells 24331143d5dSDavid Howells /* the page holds a ref on the writeback record */ 24431143d5dSDavid Howells afs_put_writeback(wb); 24531143d5dSDavid Howells set_page_private(page, 0); 24631143d5dSDavid Howells ClearPagePrivate(page); 24731143d5dSDavid Howells goto try_again; 24831143d5dSDavid Howells } 24931143d5dSDavid Howells 25031143d5dSDavid Howells /* 25131143d5dSDavid Howells * finalise part of a write to a page 25231143d5dSDavid Howells */ 25315b4650eSNick Piggin int afs_write_end(struct file *file, struct address_space *mapping, 25415b4650eSNick Piggin loff_t pos, unsigned len, unsigned copied, 25515b4650eSNick Piggin struct page *page, void *fsdata) 25631143d5dSDavid Howells { 257496ad9aaSAl Viro struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 258215804a9SDavid Howells struct key *key = afs_file_key(file); 25931143d5dSDavid Howells loff_t i_size, maybe_i_size; 260e8e581a8SDavid Howells int ret; 26131143d5dSDavid Howells 26215b4650eSNick Piggin _enter("{%x:%u},{%lx}", 26315b4650eSNick Piggin vnode->fid.vid, vnode->fid.vnode, page->index); 26431143d5dSDavid Howells 26515b4650eSNick Piggin maybe_i_size = pos + copied; 26631143d5dSDavid Howells 26731143d5dSDavid Howells i_size = i_size_read(&vnode->vfs_inode); 26831143d5dSDavid Howells if (maybe_i_size > i_size) { 26931143d5dSDavid Howells spin_lock(&vnode->writeback_lock); 27031143d5dSDavid Howells i_size = i_size_read(&vnode->vfs_inode); 27131143d5dSDavid Howells if (maybe_i_size > i_size) 27231143d5dSDavid Howells i_size_write(&vnode->vfs_inode, maybe_i_size); 27331143d5dSDavid Howells spin_unlock(&vnode->writeback_lock); 27431143d5dSDavid Howells } 27531143d5dSDavid Howells 276e8e581a8SDavid Howells if (!PageUptodate(page)) { 277e8e581a8SDavid Howells if (copied < len) { 278e8e581a8SDavid Howells /* Try and load any missing data from the server. The 279e8e581a8SDavid Howells * unmarshalling routine will take care of clearing any 280e8e581a8SDavid Howells * bits that are beyond the EOF. 281e8e581a8SDavid Howells */ 282e8e581a8SDavid Howells ret = afs_fill_page(vnode, key, pos + copied, 283e8e581a8SDavid Howells len - copied, page); 284e8e581a8SDavid Howells if (ret < 0) 285e8e581a8SDavid Howells return ret; 286e8e581a8SDavid Howells } 287e8e581a8SDavid Howells SetPageUptodate(page); 288e8e581a8SDavid Howells } 289e8e581a8SDavid Howells 29031143d5dSDavid Howells set_page_dirty(page); 29131143d5dSDavid Howells if (PageDirty(page)) 29231143d5dSDavid Howells _debug("dirtied"); 29315b4650eSNick Piggin unlock_page(page); 29409cbfeafSKirill A. Shutemov put_page(page); 29531143d5dSDavid Howells 29615b4650eSNick Piggin return copied; 29731143d5dSDavid Howells } 29831143d5dSDavid Howells 29931143d5dSDavid Howells /* 30031143d5dSDavid Howells * kill all the pages in the given range 30131143d5dSDavid Howells */ 30231143d5dSDavid Howells static void afs_kill_pages(struct afs_vnode *vnode, bool error, 30331143d5dSDavid Howells pgoff_t first, pgoff_t last) 30431143d5dSDavid Howells { 30531143d5dSDavid Howells struct pagevec pv; 30631143d5dSDavid Howells unsigned count, loop; 30731143d5dSDavid Howells 30831143d5dSDavid Howells _enter("{%x:%u},%lx-%lx", 30931143d5dSDavid Howells vnode->fid.vid, vnode->fid.vnode, first, last); 31031143d5dSDavid Howells 31131143d5dSDavid Howells pagevec_init(&pv, 0); 31231143d5dSDavid Howells 31331143d5dSDavid Howells do { 31431143d5dSDavid Howells _debug("kill %lx-%lx", first, last); 31531143d5dSDavid Howells 31631143d5dSDavid Howells count = last - first + 1; 31731143d5dSDavid Howells if (count > PAGEVEC_SIZE) 31831143d5dSDavid Howells count = PAGEVEC_SIZE; 31931143d5dSDavid Howells pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping, 32031143d5dSDavid Howells first, count, pv.pages); 32131143d5dSDavid Howells ASSERTCMP(pv.nr, ==, count); 32231143d5dSDavid Howells 32331143d5dSDavid Howells for (loop = 0; loop < count; loop++) { 3247286a35eSDavid Howells struct page *page = pv.pages[loop]; 3257286a35eSDavid Howells ClearPageUptodate(page); 32631143d5dSDavid Howells if (error) 3277286a35eSDavid Howells SetPageError(page); 3287286a35eSDavid Howells if (PageWriteback(page)) 3297286a35eSDavid Howells end_page_writeback(page); 3307286a35eSDavid Howells if (page->index >= first) 3317286a35eSDavid Howells first = page->index + 1; 33231143d5dSDavid Howells } 33331143d5dSDavid Howells 33431143d5dSDavid Howells __pagevec_release(&pv); 33531143d5dSDavid Howells } while (first < last); 33631143d5dSDavid Howells 33731143d5dSDavid Howells _leave(""); 33831143d5dSDavid Howells } 33931143d5dSDavid Howells 34031143d5dSDavid Howells /* 341d2ddc776SDavid Howells * write to a file 342d2ddc776SDavid Howells */ 343d2ddc776SDavid Howells static int afs_store_data(struct afs_writeback *wb, pgoff_t first, pgoff_t last, 344d2ddc776SDavid Howells unsigned offset, unsigned to) 345d2ddc776SDavid Howells { 346d2ddc776SDavid Howells struct afs_fs_cursor fc; 347d2ddc776SDavid Howells struct afs_vnode *vnode = wb->vnode; 348d2ddc776SDavid Howells int ret; 349d2ddc776SDavid Howells 350d2ddc776SDavid Howells _enter("%s{%x:%u.%u},%x,%lx,%lx,%x,%x", 351d2ddc776SDavid Howells vnode->volume->name, 352d2ddc776SDavid Howells vnode->fid.vid, 353d2ddc776SDavid Howells vnode->fid.vnode, 354d2ddc776SDavid Howells vnode->fid.unique, 355d2ddc776SDavid Howells key_serial(wb->key), 356d2ddc776SDavid Howells first, last, offset, to); 357d2ddc776SDavid Howells 358d2ddc776SDavid Howells ret = -ERESTARTSYS; 359d2ddc776SDavid Howells if (afs_begin_vnode_operation(&fc, vnode, wb->key)) { 360d2ddc776SDavid Howells while (afs_select_fileserver(&fc)) { 361d2ddc776SDavid Howells fc.cb_break = vnode->cb_break + vnode->cb_s_break; 362d2ddc776SDavid Howells afs_fs_store_data(&fc, wb, first, last, offset, to); 363d2ddc776SDavid Howells } 364d2ddc776SDavid Howells 365d2ddc776SDavid Howells afs_check_for_remote_deletion(&fc, fc.vnode); 366d2ddc776SDavid Howells afs_vnode_commit_status(&fc, vnode, fc.cb_break); 367d2ddc776SDavid Howells ret = afs_end_vnode_operation(&fc); 368d2ddc776SDavid Howells } 369d2ddc776SDavid Howells 370d2ddc776SDavid Howells _leave(" = %d", ret); 371d2ddc776SDavid Howells return ret; 372d2ddc776SDavid Howells } 373d2ddc776SDavid Howells 374d2ddc776SDavid Howells /* 37531143d5dSDavid Howells * synchronously write back the locked page and any subsequent non-locked dirty 37631143d5dSDavid Howells * pages also covered by the same writeback record 37731143d5dSDavid Howells */ 37831143d5dSDavid Howells static int afs_write_back_from_locked_page(struct afs_writeback *wb, 37931143d5dSDavid Howells struct page *primary_page) 38031143d5dSDavid Howells { 38131143d5dSDavid Howells struct page *pages[8], *page; 38231143d5dSDavid Howells unsigned long count; 38331143d5dSDavid Howells unsigned n, offset, to; 38431143d5dSDavid Howells pgoff_t start, first, last; 38531143d5dSDavid Howells int loop, ret; 38631143d5dSDavid Howells 38731143d5dSDavid Howells _enter(",%lx", primary_page->index); 38831143d5dSDavid Howells 38931143d5dSDavid Howells count = 1; 39031143d5dSDavid Howells if (test_set_page_writeback(primary_page)) 39131143d5dSDavid Howells BUG(); 39231143d5dSDavid Howells 39331143d5dSDavid Howells /* find all consecutive lockable dirty pages, stopping when we find a 39431143d5dSDavid Howells * page that is not immediately lockable, is not dirty or is missing, 39531143d5dSDavid Howells * or we reach the end of the range */ 39631143d5dSDavid Howells start = primary_page->index; 39731143d5dSDavid Howells if (start >= wb->last) 39831143d5dSDavid Howells goto no_more; 39931143d5dSDavid Howells start++; 40031143d5dSDavid Howells do { 40131143d5dSDavid Howells _debug("more %lx [%lx]", start, count); 40231143d5dSDavid Howells n = wb->last - start + 1; 40331143d5dSDavid Howells if (n > ARRAY_SIZE(pages)) 40431143d5dSDavid Howells n = ARRAY_SIZE(pages); 40531143d5dSDavid Howells n = find_get_pages_contig(wb->vnode->vfs_inode.i_mapping, 40631143d5dSDavid Howells start, n, pages); 40731143d5dSDavid Howells _debug("fgpc %u", n); 40831143d5dSDavid Howells if (n == 0) 40931143d5dSDavid Howells goto no_more; 41031143d5dSDavid Howells if (pages[0]->index != start) { 4119d577b6aSDavid Howells do { 4129d577b6aSDavid Howells put_page(pages[--n]); 4139d577b6aSDavid Howells } while (n > 0); 41431143d5dSDavid Howells goto no_more; 41531143d5dSDavid Howells } 41631143d5dSDavid Howells 41731143d5dSDavid Howells for (loop = 0; loop < n; loop++) { 41831143d5dSDavid Howells page = pages[loop]; 41931143d5dSDavid Howells if (page->index > wb->last) 42031143d5dSDavid Howells break; 421529ae9aaSNick Piggin if (!trylock_page(page)) 42231143d5dSDavid Howells break; 42331143d5dSDavid Howells if (!PageDirty(page) || 42431143d5dSDavid Howells page_private(page) != (unsigned long) wb) { 42531143d5dSDavid Howells unlock_page(page); 42631143d5dSDavid Howells break; 42731143d5dSDavid Howells } 42831143d5dSDavid Howells if (!clear_page_dirty_for_io(page)) 42931143d5dSDavid Howells BUG(); 43031143d5dSDavid Howells if (test_set_page_writeback(page)) 43131143d5dSDavid Howells BUG(); 43231143d5dSDavid Howells unlock_page(page); 43331143d5dSDavid Howells put_page(page); 43431143d5dSDavid Howells } 43531143d5dSDavid Howells count += loop; 43631143d5dSDavid Howells if (loop < n) { 43731143d5dSDavid Howells for (; loop < n; loop++) 43831143d5dSDavid Howells put_page(pages[loop]); 43931143d5dSDavid Howells goto no_more; 44031143d5dSDavid Howells } 44131143d5dSDavid Howells 44231143d5dSDavid Howells start += loop; 44331143d5dSDavid Howells } while (start <= wb->last && count < 65536); 44431143d5dSDavid Howells 44531143d5dSDavid Howells no_more: 44631143d5dSDavid Howells /* we now have a contiguous set of dirty pages, each with writeback set 44731143d5dSDavid Howells * and the dirty mark cleared; the first page is locked and must remain 44831143d5dSDavid Howells * so, all the rest are unlocked */ 44931143d5dSDavid Howells first = primary_page->index; 45031143d5dSDavid Howells last = first + count - 1; 45131143d5dSDavid Howells 45231143d5dSDavid Howells offset = (first == wb->first) ? wb->offset_first : 0; 45331143d5dSDavid Howells to = (last == wb->last) ? wb->to_last : PAGE_SIZE; 45431143d5dSDavid Howells 45531143d5dSDavid Howells _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to); 45631143d5dSDavid Howells 457d2ddc776SDavid Howells ret = afs_store_data(wb, first, last, offset, to); 45831143d5dSDavid Howells if (ret < 0) { 45931143d5dSDavid Howells switch (ret) { 46031143d5dSDavid Howells case -EDQUOT: 46131143d5dSDavid Howells case -ENOSPC: 4625114a97aSMichal Hocko mapping_set_error(wb->vnode->vfs_inode.i_mapping, -ENOSPC); 46331143d5dSDavid Howells break; 46431143d5dSDavid Howells case -EROFS: 46531143d5dSDavid Howells case -EIO: 46631143d5dSDavid Howells case -EREMOTEIO: 46731143d5dSDavid Howells case -EFBIG: 46831143d5dSDavid Howells case -ENOENT: 46931143d5dSDavid Howells case -ENOMEDIUM: 47031143d5dSDavid Howells case -ENXIO: 47131143d5dSDavid Howells afs_kill_pages(wb->vnode, true, first, last); 4725114a97aSMichal Hocko mapping_set_error(wb->vnode->vfs_inode.i_mapping, -EIO); 47331143d5dSDavid Howells break; 47431143d5dSDavid Howells case -EACCES: 47531143d5dSDavid Howells case -EPERM: 47631143d5dSDavid Howells case -ENOKEY: 47731143d5dSDavid Howells case -EKEYEXPIRED: 47831143d5dSDavid Howells case -EKEYREJECTED: 47931143d5dSDavid Howells case -EKEYREVOKED: 48031143d5dSDavid Howells afs_kill_pages(wb->vnode, false, first, last); 48131143d5dSDavid Howells break; 48231143d5dSDavid Howells default: 48331143d5dSDavid Howells break; 48431143d5dSDavid Howells } 48531143d5dSDavid Howells } else { 48631143d5dSDavid Howells ret = count; 48731143d5dSDavid Howells } 48831143d5dSDavid Howells 48931143d5dSDavid Howells _leave(" = %d", ret); 49031143d5dSDavid Howells return ret; 49131143d5dSDavid Howells } 49231143d5dSDavid Howells 49331143d5dSDavid Howells /* 49431143d5dSDavid Howells * write a page back to the server 49531143d5dSDavid Howells * - the caller locked the page for us 49631143d5dSDavid Howells */ 49731143d5dSDavid Howells int afs_writepage(struct page *page, struct writeback_control *wbc) 49831143d5dSDavid Howells { 49931143d5dSDavid Howells struct afs_writeback *wb; 50031143d5dSDavid Howells int ret; 50131143d5dSDavid Howells 50231143d5dSDavid Howells _enter("{%lx},", page->index); 50331143d5dSDavid Howells 50431143d5dSDavid Howells wb = (struct afs_writeback *) page_private(page); 50531143d5dSDavid Howells ASSERT(wb != NULL); 50631143d5dSDavid Howells 50731143d5dSDavid Howells ret = afs_write_back_from_locked_page(wb, page); 50831143d5dSDavid Howells unlock_page(page); 50931143d5dSDavid Howells if (ret < 0) { 51031143d5dSDavid Howells _leave(" = %d", ret); 51131143d5dSDavid Howells return 0; 51231143d5dSDavid Howells } 51331143d5dSDavid Howells 51431143d5dSDavid Howells wbc->nr_to_write -= ret; 51531143d5dSDavid Howells 51631143d5dSDavid Howells _leave(" = 0"); 51731143d5dSDavid Howells return 0; 51831143d5dSDavid Howells } 51931143d5dSDavid Howells 52031143d5dSDavid Howells /* 52131143d5dSDavid Howells * write a region of pages back to the server 52231143d5dSDavid Howells */ 523c1206a2cSAdrian Bunk static int afs_writepages_region(struct address_space *mapping, 52431143d5dSDavid Howells struct writeback_control *wbc, 52531143d5dSDavid Howells pgoff_t index, pgoff_t end, pgoff_t *_next) 52631143d5dSDavid Howells { 52731143d5dSDavid Howells struct afs_writeback *wb; 52831143d5dSDavid Howells struct page *page; 52931143d5dSDavid Howells int ret, n; 53031143d5dSDavid Howells 53131143d5dSDavid Howells _enter(",,%lx,%lx,", index, end); 53231143d5dSDavid Howells 53331143d5dSDavid Howells do { 53431143d5dSDavid Howells n = find_get_pages_tag(mapping, &index, PAGECACHE_TAG_DIRTY, 53531143d5dSDavid Howells 1, &page); 53631143d5dSDavid Howells if (!n) 53731143d5dSDavid Howells break; 53831143d5dSDavid Howells 53931143d5dSDavid Howells _debug("wback %lx", page->index); 54031143d5dSDavid Howells 54131143d5dSDavid Howells if (page->index > end) { 54231143d5dSDavid Howells *_next = index; 54309cbfeafSKirill A. Shutemov put_page(page); 54431143d5dSDavid Howells _leave(" = 0 [%lx]", *_next); 54531143d5dSDavid Howells return 0; 54631143d5dSDavid Howells } 54731143d5dSDavid Howells 54831143d5dSDavid Howells /* at this point we hold neither mapping->tree_lock nor lock on 54931143d5dSDavid Howells * the page itself: the page may be truncated or invalidated 55031143d5dSDavid Howells * (changing page->mapping to NULL), or even swizzled back from 55131143d5dSDavid Howells * swapper_space to tmpfs file mapping 55231143d5dSDavid Howells */ 55331143d5dSDavid Howells lock_page(page); 55431143d5dSDavid Howells 555c5051c7bSDavid Howells if (page->mapping != mapping || !PageDirty(page)) { 55631143d5dSDavid Howells unlock_page(page); 55709cbfeafSKirill A. Shutemov put_page(page); 55831143d5dSDavid Howells continue; 55931143d5dSDavid Howells } 56031143d5dSDavid Howells 561c5051c7bSDavid Howells if (PageWriteback(page)) { 562c5051c7bSDavid Howells unlock_page(page); 56331143d5dSDavid Howells if (wbc->sync_mode != WB_SYNC_NONE) 56431143d5dSDavid Howells wait_on_page_writeback(page); 56529c8bbbdSDavid Howells put_page(page); 56631143d5dSDavid Howells continue; 56731143d5dSDavid Howells } 56831143d5dSDavid Howells 56931143d5dSDavid Howells wb = (struct afs_writeback *) page_private(page); 57031143d5dSDavid Howells ASSERT(wb != NULL); 57131143d5dSDavid Howells 57231143d5dSDavid Howells spin_lock(&wb->vnode->writeback_lock); 57331143d5dSDavid Howells wb->state = AFS_WBACK_WRITING; 57431143d5dSDavid Howells spin_unlock(&wb->vnode->writeback_lock); 57531143d5dSDavid Howells 57665a15109SDavid Howells if (!clear_page_dirty_for_io(page)) 57765a15109SDavid Howells BUG(); 57831143d5dSDavid Howells ret = afs_write_back_from_locked_page(wb, page); 57931143d5dSDavid Howells unlock_page(page); 58009cbfeafSKirill A. Shutemov put_page(page); 58131143d5dSDavid Howells if (ret < 0) { 58231143d5dSDavid Howells _leave(" = %d", ret); 58331143d5dSDavid Howells return ret; 58431143d5dSDavid Howells } 58531143d5dSDavid Howells 58631143d5dSDavid Howells wbc->nr_to_write -= ret; 58731143d5dSDavid Howells 58831143d5dSDavid Howells cond_resched(); 58931143d5dSDavid Howells } while (index < end && wbc->nr_to_write > 0); 59031143d5dSDavid Howells 59131143d5dSDavid Howells *_next = index; 59231143d5dSDavid Howells _leave(" = 0 [%lx]", *_next); 59331143d5dSDavid Howells return 0; 59431143d5dSDavid Howells } 59531143d5dSDavid Howells 59631143d5dSDavid Howells /* 59731143d5dSDavid Howells * write some of the pending data back to the server 59831143d5dSDavid Howells */ 59931143d5dSDavid Howells int afs_writepages(struct address_space *mapping, 60031143d5dSDavid Howells struct writeback_control *wbc) 60131143d5dSDavid Howells { 60231143d5dSDavid Howells pgoff_t start, end, next; 60331143d5dSDavid Howells int ret; 60431143d5dSDavid Howells 60531143d5dSDavid Howells _enter(""); 60631143d5dSDavid Howells 60731143d5dSDavid Howells if (wbc->range_cyclic) { 60831143d5dSDavid Howells start = mapping->writeback_index; 60931143d5dSDavid Howells end = -1; 61031143d5dSDavid Howells ret = afs_writepages_region(mapping, wbc, start, end, &next); 6111b430beeSWu Fengguang if (start > 0 && wbc->nr_to_write > 0 && ret == 0) 61231143d5dSDavid Howells ret = afs_writepages_region(mapping, wbc, 0, start, 61331143d5dSDavid Howells &next); 61431143d5dSDavid Howells mapping->writeback_index = next; 61531143d5dSDavid Howells } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { 61609cbfeafSKirill A. Shutemov end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT); 61731143d5dSDavid Howells ret = afs_writepages_region(mapping, wbc, 0, end, &next); 61831143d5dSDavid Howells if (wbc->nr_to_write > 0) 61931143d5dSDavid Howells mapping->writeback_index = next; 62031143d5dSDavid Howells } else { 62109cbfeafSKirill A. Shutemov start = wbc->range_start >> PAGE_SHIFT; 62209cbfeafSKirill A. Shutemov end = wbc->range_end >> PAGE_SHIFT; 62331143d5dSDavid Howells ret = afs_writepages_region(mapping, wbc, start, end, &next); 62431143d5dSDavid Howells } 62531143d5dSDavid Howells 62631143d5dSDavid Howells _leave(" = %d", ret); 62731143d5dSDavid Howells return ret; 62831143d5dSDavid Howells } 62931143d5dSDavid Howells 63031143d5dSDavid Howells /* 63131143d5dSDavid Howells * completion of write to server 63231143d5dSDavid Howells */ 63331143d5dSDavid Howells void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call) 63431143d5dSDavid Howells { 63531143d5dSDavid Howells struct afs_writeback *wb = call->wb; 63631143d5dSDavid Howells struct pagevec pv; 63731143d5dSDavid Howells unsigned count, loop; 63831143d5dSDavid Howells pgoff_t first = call->first, last = call->last; 63931143d5dSDavid Howells bool free_wb; 64031143d5dSDavid Howells 64131143d5dSDavid Howells _enter("{%x:%u},{%lx-%lx}", 64231143d5dSDavid Howells vnode->fid.vid, vnode->fid.vnode, first, last); 64331143d5dSDavid Howells 64431143d5dSDavid Howells ASSERT(wb != NULL); 64531143d5dSDavid Howells 64631143d5dSDavid Howells pagevec_init(&pv, 0); 64731143d5dSDavid Howells 64831143d5dSDavid Howells do { 6495bbf5d39SDavid Howells _debug("done %lx-%lx", first, last); 65031143d5dSDavid Howells 65131143d5dSDavid Howells count = last - first + 1; 65231143d5dSDavid Howells if (count > PAGEVEC_SIZE) 65331143d5dSDavid Howells count = PAGEVEC_SIZE; 65431143d5dSDavid Howells pv.nr = find_get_pages_contig(call->mapping, first, count, 65531143d5dSDavid Howells pv.pages); 65631143d5dSDavid Howells ASSERTCMP(pv.nr, ==, count); 65731143d5dSDavid Howells 65831143d5dSDavid Howells spin_lock(&vnode->writeback_lock); 65931143d5dSDavid Howells for (loop = 0; loop < count; loop++) { 66031143d5dSDavid Howells struct page *page = pv.pages[loop]; 66131143d5dSDavid Howells end_page_writeback(page); 66231143d5dSDavid Howells if (page_private(page) == (unsigned long) wb) { 66331143d5dSDavid Howells set_page_private(page, 0); 66431143d5dSDavid Howells ClearPagePrivate(page); 66531143d5dSDavid Howells wb->usage--; 66631143d5dSDavid Howells } 66731143d5dSDavid Howells } 66831143d5dSDavid Howells free_wb = false; 66931143d5dSDavid Howells if (wb->usage == 0) { 67031143d5dSDavid Howells afs_unlink_writeback(wb); 67131143d5dSDavid Howells free_wb = true; 67231143d5dSDavid Howells } 67331143d5dSDavid Howells spin_unlock(&vnode->writeback_lock); 67431143d5dSDavid Howells first += count; 67531143d5dSDavid Howells if (free_wb) { 67631143d5dSDavid Howells afs_free_writeback(wb); 67731143d5dSDavid Howells wb = NULL; 67831143d5dSDavid Howells } 67931143d5dSDavid Howells 68031143d5dSDavid Howells __pagevec_release(&pv); 6815bbf5d39SDavid Howells } while (first <= last); 68231143d5dSDavid Howells 68331143d5dSDavid Howells _leave(""); 68431143d5dSDavid Howells } 68531143d5dSDavid Howells 68631143d5dSDavid Howells /* 68731143d5dSDavid Howells * write to an AFS file 68831143d5dSDavid Howells */ 68950b5551dSAl Viro ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) 69031143d5dSDavid Howells { 691496ad9aaSAl Viro struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp)); 69231143d5dSDavid Howells ssize_t result; 69350b5551dSAl Viro size_t count = iov_iter_count(from); 69431143d5dSDavid Howells 69550b5551dSAl Viro _enter("{%x.%u},{%zu},", 69650b5551dSAl Viro vnode->fid.vid, vnode->fid.vnode, count); 69731143d5dSDavid Howells 69831143d5dSDavid Howells if (IS_SWAPFILE(&vnode->vfs_inode)) { 69931143d5dSDavid Howells printk(KERN_INFO 70031143d5dSDavid Howells "AFS: Attempt to write to active swap file!\n"); 70131143d5dSDavid Howells return -EBUSY; 70231143d5dSDavid Howells } 70331143d5dSDavid Howells 70431143d5dSDavid Howells if (!count) 70531143d5dSDavid Howells return 0; 70631143d5dSDavid Howells 70750b5551dSAl Viro result = generic_file_write_iter(iocb, from); 70831143d5dSDavid Howells 70931143d5dSDavid Howells _leave(" = %zd", result); 71031143d5dSDavid Howells return result; 71131143d5dSDavid Howells } 71231143d5dSDavid Howells 71331143d5dSDavid Howells /* 71431143d5dSDavid Howells * flush the vnode to the fileserver 71531143d5dSDavid Howells */ 71631143d5dSDavid Howells int afs_writeback_all(struct afs_vnode *vnode) 71731143d5dSDavid Howells { 71831143d5dSDavid Howells struct address_space *mapping = vnode->vfs_inode.i_mapping; 71931143d5dSDavid Howells struct writeback_control wbc = { 72031143d5dSDavid Howells .sync_mode = WB_SYNC_ALL, 72131143d5dSDavid Howells .nr_to_write = LONG_MAX, 72231143d5dSDavid Howells .range_cyclic = 1, 72331143d5dSDavid Howells }; 72431143d5dSDavid Howells int ret; 72531143d5dSDavid Howells 72631143d5dSDavid Howells _enter(""); 72731143d5dSDavid Howells 72831143d5dSDavid Howells ret = mapping->a_ops->writepages(mapping, &wbc); 72931143d5dSDavid Howells __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 73031143d5dSDavid Howells 73131143d5dSDavid Howells _leave(" = %d", ret); 73231143d5dSDavid Howells return ret; 73331143d5dSDavid Howells } 73431143d5dSDavid Howells 73531143d5dSDavid Howells /* 73631143d5dSDavid Howells * flush any dirty pages for this process, and check for write errors. 73731143d5dSDavid Howells * - the return status from this call provides a reliable indication of 73831143d5dSDavid Howells * whether any write errors occurred for this process. 73931143d5dSDavid Howells */ 74002c24a82SJosef Bacik int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 74131143d5dSDavid Howells { 7423c981bfcSAl Viro struct inode *inode = file_inode(file); 74331143d5dSDavid Howells struct afs_writeback *wb, *xwb; 7443c981bfcSAl Viro struct afs_vnode *vnode = AFS_FS_I(inode); 74531143d5dSDavid Howells int ret; 74631143d5dSDavid Howells 7473c981bfcSAl Viro _enter("{%x:%u},{n=%pD},%d", 7483c981bfcSAl Viro vnode->fid.vid, vnode->fid.vnode, file, 74931143d5dSDavid Howells datasync); 75031143d5dSDavid Howells 7513b49c9a1SJeff Layton ret = file_write_and_wait_range(file, start, end); 75202c24a82SJosef Bacik if (ret) 75302c24a82SJosef Bacik return ret; 7545955102cSAl Viro inode_lock(inode); 75502c24a82SJosef Bacik 75631143d5dSDavid Howells /* use a writeback record as a marker in the queue - when this reaches 75731143d5dSDavid Howells * the front of the queue, all the outstanding writes are either 75831143d5dSDavid Howells * completed or rejected */ 75931143d5dSDavid Howells wb = kzalloc(sizeof(*wb), GFP_KERNEL); 76002c24a82SJosef Bacik if (!wb) { 76102c24a82SJosef Bacik ret = -ENOMEM; 76202c24a82SJosef Bacik goto out; 76302c24a82SJosef Bacik } 76431143d5dSDavid Howells wb->vnode = vnode; 76531143d5dSDavid Howells wb->first = 0; 76631143d5dSDavid Howells wb->last = -1; 76731143d5dSDavid Howells wb->offset_first = 0; 76831143d5dSDavid Howells wb->to_last = PAGE_SIZE; 76931143d5dSDavid Howells wb->usage = 1; 77031143d5dSDavid Howells wb->state = AFS_WBACK_SYNCING; 77131143d5dSDavid Howells init_waitqueue_head(&wb->waitq); 77231143d5dSDavid Howells 77331143d5dSDavid Howells spin_lock(&vnode->writeback_lock); 77431143d5dSDavid Howells list_for_each_entry(xwb, &vnode->writebacks, link) { 77531143d5dSDavid Howells if (xwb->state == AFS_WBACK_PENDING) 77631143d5dSDavid Howells xwb->state = AFS_WBACK_CONFLICTING; 77731143d5dSDavid Howells } 77831143d5dSDavid Howells list_add_tail(&wb->link, &vnode->writebacks); 77931143d5dSDavid Howells spin_unlock(&vnode->writeback_lock); 78031143d5dSDavid Howells 78131143d5dSDavid Howells /* push all the outstanding writebacks to the server */ 78231143d5dSDavid Howells ret = afs_writeback_all(vnode); 78331143d5dSDavid Howells if (ret < 0) { 78431143d5dSDavid Howells afs_put_writeback(wb); 78531143d5dSDavid Howells _leave(" = %d [wb]", ret); 78602c24a82SJosef Bacik goto out; 78731143d5dSDavid Howells } 78831143d5dSDavid Howells 78931143d5dSDavid Howells /* wait for the preceding writes to actually complete */ 79031143d5dSDavid Howells ret = wait_event_interruptible(wb->waitq, 79131143d5dSDavid Howells wb->state == AFS_WBACK_COMPLETE || 79231143d5dSDavid Howells vnode->writebacks.next == &wb->link); 79331143d5dSDavid Howells afs_put_writeback(wb); 79431143d5dSDavid Howells _leave(" = %d", ret); 79502c24a82SJosef Bacik out: 7965955102cSAl Viro inode_unlock(inode); 79731143d5dSDavid Howells return ret; 79831143d5dSDavid Howells } 7999b3f26c9SDavid Howells 8009b3f26c9SDavid Howells /* 80158fed94dSDavid Howells * Flush out all outstanding writes on a file opened for writing when it is 80258fed94dSDavid Howells * closed. 80358fed94dSDavid Howells */ 80458fed94dSDavid Howells int afs_flush(struct file *file, fl_owner_t id) 80558fed94dSDavid Howells { 80658fed94dSDavid Howells _enter(""); 80758fed94dSDavid Howells 80858fed94dSDavid Howells if ((file->f_mode & FMODE_WRITE) == 0) 80958fed94dSDavid Howells return 0; 81058fed94dSDavid Howells 81158fed94dSDavid Howells return vfs_fsync(file, 0); 81258fed94dSDavid Howells } 81358fed94dSDavid Howells 81458fed94dSDavid Howells /* 8159b3f26c9SDavid Howells * notification that a previously read-only page is about to become writable 8169b3f26c9SDavid Howells * - if it returns an error, the caller will deliver a bus error signal 8179b3f26c9SDavid Howells */ 8189b3f26c9SDavid Howells int afs_page_mkwrite(struct vm_area_struct *vma, struct page *page) 8199b3f26c9SDavid Howells { 8209b3f26c9SDavid Howells struct afs_vnode *vnode = AFS_FS_I(vma->vm_file->f_mapping->host); 8219b3f26c9SDavid Howells 8229b3f26c9SDavid Howells _enter("{{%x:%u}},{%lx}", 8239b3f26c9SDavid Howells vnode->fid.vid, vnode->fid.vnode, page->index); 8249b3f26c9SDavid Howells 8259b3f26c9SDavid Howells /* wait for the page to be written to the cache before we allow it to 8269b3f26c9SDavid Howells * be modified */ 8279b3f26c9SDavid Howells #ifdef CONFIG_AFS_FSCACHE 8289b3f26c9SDavid Howells fscache_wait_on_page_write(vnode->cache, page); 8299b3f26c9SDavid Howells #endif 8309b3f26c9SDavid Howells 8319b3f26c9SDavid Howells _leave(" = 0"); 8329b3f26c9SDavid Howells return 0; 8339b3f26c9SDavid Howells } 834