11f327613SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2147b31cfSEric Van Hensbergen /* 3147b31cfSEric Van Hensbergen * This file contians vfs address (mmap) ops for 9P2000. 4147b31cfSEric Van Hensbergen * 5147b31cfSEric Van Hensbergen * Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com> 6147b31cfSEric Van Hensbergen * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> 7147b31cfSEric Van Hensbergen */ 8147b31cfSEric Van Hensbergen 9147b31cfSEric Van Hensbergen #include <linux/module.h> 10147b31cfSEric Van Hensbergen #include <linux/errno.h> 11147b31cfSEric Van Hensbergen #include <linux/fs.h> 12147b31cfSEric Van Hensbergen #include <linux/file.h> 13147b31cfSEric Van Hensbergen #include <linux/stat.h> 14147b31cfSEric Van Hensbergen #include <linux/string.h> 15147b31cfSEric Van Hensbergen #include <linux/inet.h> 16147b31cfSEric Van Hensbergen #include <linux/pagemap.h> 17147b31cfSEric Van Hensbergen #include <linux/idr.h> 18e8edc6e0SAlexey Dobriyan #include <linux/sched.h> 19d7bdba1cSDavid Howells #include <linux/swap.h> 20e2e40f2cSChristoph Hellwig #include <linux/uio.h> 21eb497943SDavid Howells #include <linux/netfs.h> 22bd238fb4SLatchesar Ionkov #include <net/9p/9p.h> 23bd238fb4SLatchesar Ionkov #include <net/9p/client.h> 24147b31cfSEric Van Hensbergen 25147b31cfSEric Van Hensbergen #include "v9fs.h" 26147b31cfSEric Van Hensbergen #include "v9fs_vfs.h" 2760e78d2cSAbhishek Kulkarni #include "cache.h" 287263cebeSAneesh Kumar K.V #include "fid.h" 29147b31cfSEric Van Hensbergen 30147b31cfSEric Van Hensbergen /** 31eb497943SDavid Howells * v9fs_req_issue_op - Issue a read from 9P 32eb497943SDavid Howells * @subreq: The read to make 33147b31cfSEric Van Hensbergen */ 34eb497943SDavid Howells static void v9fs_req_issue_op(struct netfs_read_subrequest *subreq) 35147b31cfSEric Van Hensbergen { 36eb497943SDavid Howells struct netfs_read_request *rreq = subreq->rreq; 37eb497943SDavid Howells struct p9_fid *fid = rreq->netfs_priv; 38e1200fe6SAl Viro struct iov_iter to; 39eb497943SDavid Howells loff_t pos = subreq->start + subreq->transferred; 40eb497943SDavid Howells size_t len = subreq->len - subreq->transferred; 41eb497943SDavid Howells int total, err; 42147b31cfSEric Van Hensbergen 43eb497943SDavid Howells iov_iter_xarray(&to, READ, &rreq->mapping->i_pages, pos, len); 4460e78d2cSAbhishek Kulkarni 45eb497943SDavid Howells total = p9_client_read(fid, pos, &to, &err); 4619d1c326SDominique Martinet 4719d1c326SDominique Martinet /* if we just extended the file size, any portion not in 4819d1c326SDominique Martinet * cache won't be on server and is zeroes */ 4919d1c326SDominique Martinet __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 5019d1c326SDominique Martinet 51eb497943SDavid Howells netfs_subreq_terminated(subreq, err ?: total, false); 5260e78d2cSAbhishek Kulkarni } 53147b31cfSEric Van Hensbergen 54eb497943SDavid Howells /** 55eb497943SDavid Howells * v9fs_init_rreq - Initialise a read request 56eb497943SDavid Howells * @rreq: The read request 57eb497943SDavid Howells * @file: The file being read from 58eb497943SDavid Howells */ 59eb497943SDavid Howells static void v9fs_init_rreq(struct netfs_read_request *rreq, struct file *file) 60eb497943SDavid Howells { 61eb497943SDavid Howells struct p9_fid *fid = file->private_data; 6260e78d2cSAbhishek Kulkarni 63eb497943SDavid Howells refcount_inc(&fid->count); 64eb497943SDavid Howells rreq->netfs_priv = fid; 65147b31cfSEric Van Hensbergen } 66147b31cfSEric Van Hensbergen 6760e78d2cSAbhishek Kulkarni /** 68eb497943SDavid Howells * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_rreq 69eb497943SDavid Howells * @mapping: unused mapping of request to cleanup 70eb497943SDavid Howells * @priv: private data to cleanup, a fid, guaranted non-null. 71eb497943SDavid Howells */ 72eb497943SDavid Howells static void v9fs_req_cleanup(struct address_space *mapping, void *priv) 73eb497943SDavid Howells { 74eb497943SDavid Howells struct p9_fid *fid = priv; 75eb497943SDavid Howells 76eb497943SDavid Howells p9_client_clunk(fid); 77eb497943SDavid Howells } 78eb497943SDavid Howells 79eb497943SDavid Howells /** 80eb497943SDavid Howells * v9fs_is_cache_enabled - Determine if caching is enabled for an inode 81eb497943SDavid Howells * @inode: The inode to check 82eb497943SDavid Howells */ 83eb497943SDavid Howells static bool v9fs_is_cache_enabled(struct inode *inode) 84eb497943SDavid Howells { 8524e42e32SDavid Howells struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(inode)); 8624e42e32SDavid Howells 8724e42e32SDavid Howells return fscache_cookie_enabled(cookie) && cookie->cache_priv; 88eb497943SDavid Howells } 89eb497943SDavid Howells 90eb497943SDavid Howells /** 91eb497943SDavid Howells * v9fs_begin_cache_operation - Begin a cache operation for a read 92eb497943SDavid Howells * @rreq: The read request 93eb497943SDavid Howells */ 94eb497943SDavid Howells static int v9fs_begin_cache_operation(struct netfs_read_request *rreq) 95eb497943SDavid Howells { 962cee6fbbSDavid Howells #ifdef CONFIG_9P_FSCACHE 97eb497943SDavid Howells struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode)); 98eb497943SDavid Howells 9924e42e32SDavid Howells return fscache_begin_read_operation(&rreq->cache_resources, cookie); 1002cee6fbbSDavid Howells #else 1012cee6fbbSDavid Howells return -ENOBUFS; 1022cee6fbbSDavid Howells #endif 103eb497943SDavid Howells } 104eb497943SDavid Howells 105eb497943SDavid Howells static const struct netfs_read_request_ops v9fs_req_ops = { 106eb497943SDavid Howells .init_rreq = v9fs_init_rreq, 107eb497943SDavid Howells .is_cache_enabled = v9fs_is_cache_enabled, 108eb497943SDavid Howells .begin_cache_operation = v9fs_begin_cache_operation, 109eb497943SDavid Howells .issue_op = v9fs_req_issue_op, 110eb497943SDavid Howells .cleanup = v9fs_req_cleanup, 111eb497943SDavid Howells }; 112eb497943SDavid Howells 113eb497943SDavid Howells /** 1147263cebeSAneesh Kumar K.V * v9fs_vfs_readpage - read an entire page in from 9P 115eb497943SDavid Howells * @file: file being read 1167263cebeSAneesh Kumar K.V * @page: structure to page 1177263cebeSAneesh Kumar K.V * 1187263cebeSAneesh Kumar K.V */ 119eb497943SDavid Howells static int v9fs_vfs_readpage(struct file *file, struct page *page) 1207263cebeSAneesh Kumar K.V { 12178525c74SDavid Howells struct folio *folio = page_folio(page); 12278525c74SDavid Howells 12378525c74SDavid Howells return netfs_readpage(file, folio, &v9fs_req_ops, NULL); 1247263cebeSAneesh Kumar K.V } 1257263cebeSAneesh Kumar K.V 1267263cebeSAneesh Kumar K.V /** 127eb497943SDavid Howells * v9fs_vfs_readahead - read a set of pages from 9P 128eb497943SDavid Howells * @ractl: The readahead parameters 12960e78d2cSAbhishek Kulkarni */ 130eb497943SDavid Howells static void v9fs_vfs_readahead(struct readahead_control *ractl) 13160e78d2cSAbhishek Kulkarni { 132eb497943SDavid Howells netfs_readahead(ractl, &v9fs_req_ops, NULL); 13360e78d2cSAbhishek Kulkarni } 13460e78d2cSAbhishek Kulkarni 13560e78d2cSAbhishek Kulkarni /** 13660e78d2cSAbhishek Kulkarni * v9fs_release_page - release the private state associated with a page 137bc868036SDavid Howells * @page: The page to be released 138bc868036SDavid Howells * @gfp: The caller's allocation restrictions 13960e78d2cSAbhishek Kulkarni * 14060e78d2cSAbhishek Kulkarni * Returns 1 if the page can be released, false otherwise. 14160e78d2cSAbhishek Kulkarni */ 14260e78d2cSAbhishek Kulkarni 14360e78d2cSAbhishek Kulkarni static int v9fs_release_page(struct page *page, gfp_t gfp) 14460e78d2cSAbhishek Kulkarni { 14578525c74SDavid Howells struct folio *folio = page_folio(page); 14693c84614SDavid Howells struct inode *inode = folio_inode(folio); 14778525c74SDavid Howells 14878525c74SDavid Howells if (folio_test_private(folio)) 14960e78d2cSAbhishek Kulkarni return 0; 150eb497943SDavid Howells #ifdef CONFIG_9P_FSCACHE 15178525c74SDavid Howells if (folio_test_fscache(folio)) { 152d7bdba1cSDavid Howells if (current_is_kswapd() || !(gfp & __GFP_FS)) 153eb497943SDavid Howells return 0; 15478525c74SDavid Howells folio_wait_fscache(folio); 155eb497943SDavid Howells } 156eb497943SDavid Howells #endif 15793c84614SDavid Howells fscache_note_page_release(v9fs_inode_cookie(V9FS_I(inode))); 158eb497943SDavid Howells return 1; 15960e78d2cSAbhishek Kulkarni } 16060e78d2cSAbhishek Kulkarni 161*040cdd4bSMatthew Wilcox (Oracle) static void v9fs_invalidate_folio(struct folio *folio, size_t offset, 162*040cdd4bSMatthew Wilcox (Oracle) size_t length) 16360e78d2cSAbhishek Kulkarni { 16478525c74SDavid Howells folio_wait_fscache(folio); 16560e78d2cSAbhishek Kulkarni } 16660e78d2cSAbhishek Kulkarni 16793c84614SDavid Howells static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error, 16893c84614SDavid Howells bool was_async) 16993c84614SDavid Howells { 17093c84614SDavid Howells struct v9fs_inode *v9inode = priv; 17193c84614SDavid Howells __le32 version; 17293c84614SDavid Howells 17393c84614SDavid Howells if (IS_ERR_VALUE(transferred_or_error) && 17493c84614SDavid Howells transferred_or_error != -ENOBUFS) { 17593c84614SDavid Howells version = cpu_to_le32(v9inode->qid.version); 17693c84614SDavid Howells fscache_invalidate(v9fs_inode_cookie(v9inode), &version, 17793c84614SDavid Howells i_size_read(&v9inode->vfs_inode), 0); 17893c84614SDavid Howells } 17993c84614SDavid Howells } 18093c84614SDavid Howells 18178525c74SDavid Howells static int v9fs_vfs_write_folio_locked(struct folio *folio) 1827263cebeSAneesh Kumar K.V { 18378525c74SDavid Howells struct inode *inode = folio_inode(folio); 184371098c6SAl Viro struct v9fs_inode *v9inode = V9FS_I(inode); 18593c84614SDavid Howells struct fscache_cookie *cookie = v9fs_inode_cookie(v9inode); 18678525c74SDavid Howells loff_t start = folio_pos(folio); 18778525c74SDavid Howells loff_t i_size = i_size_read(inode); 188371098c6SAl Viro struct iov_iter from; 18978525c74SDavid Howells size_t len = folio_size(folio); 19078525c74SDavid Howells int err; 1917263cebeSAneesh Kumar K.V 19278525c74SDavid Howells if (start >= i_size) 19378525c74SDavid Howells return 0; /* Simultaneous truncation occurred */ 1947263cebeSAneesh Kumar K.V 19578525c74SDavid Howells len = min_t(loff_t, i_size - start, len); 19678525c74SDavid Howells 19778525c74SDavid Howells iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len); 1987263cebeSAneesh Kumar K.V 1996b39f6d2SAneesh Kumar K.V /* We should have writeback_fid always set */ 2006b39f6d2SAneesh Kumar K.V BUG_ON(!v9inode->writeback_fid); 2017263cebeSAneesh Kumar K.V 20293c84614SDavid Howells folio_wait_fscache(folio); 20378525c74SDavid Howells folio_start_writeback(folio); 2047263cebeSAneesh Kumar K.V 205eb497943SDavid Howells p9_client_write(v9inode->writeback_fid, start, &from, &err); 206371098c6SAl Viro 20793c84614SDavid Howells if (err == 0 && 20893c84614SDavid Howells fscache_cookie_enabled(cookie) && 20993c84614SDavid Howells test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) { 21093c84614SDavid Howells folio_start_fscache(folio); 21193c84614SDavid Howells fscache_write_to_cache(v9fs_inode_cookie(v9inode), 21293c84614SDavid Howells folio_mapping(folio), start, len, i_size, 21393c84614SDavid Howells v9fs_write_to_cache_done, v9inode, 21493c84614SDavid Howells true); 21593c84614SDavid Howells } 21693c84614SDavid Howells 21778525c74SDavid Howells folio_end_writeback(folio); 218371098c6SAl Viro return err; 2197263cebeSAneesh Kumar K.V } 2207263cebeSAneesh Kumar K.V 2217263cebeSAneesh Kumar K.V static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc) 2227263cebeSAneesh Kumar K.V { 22378525c74SDavid Howells struct folio *folio = page_folio(page); 2247263cebeSAneesh Kumar K.V int retval; 2257263cebeSAneesh Kumar K.V 22678525c74SDavid Howells p9_debug(P9_DEBUG_VFS, "folio %p\n", folio); 227fb89b45cSDominique Martinet 22878525c74SDavid Howells retval = v9fs_vfs_write_folio_locked(folio); 2297263cebeSAneesh Kumar K.V if (retval < 0) { 2307263cebeSAneesh Kumar K.V if (retval == -EAGAIN) { 23178525c74SDavid Howells folio_redirty_for_writepage(wbc, folio); 2327263cebeSAneesh Kumar K.V retval = 0; 2337263cebeSAneesh Kumar K.V } else { 23478525c74SDavid Howells mapping_set_error(folio_mapping(folio), retval); 2357263cebeSAneesh Kumar K.V } 2367263cebeSAneesh Kumar K.V } else 2377263cebeSAneesh Kumar K.V retval = 0; 2387263cebeSAneesh Kumar K.V 23978525c74SDavid Howells folio_unlock(folio); 2407263cebeSAneesh Kumar K.V return retval; 2417263cebeSAneesh Kumar K.V } 2427263cebeSAneesh Kumar K.V 24360e78d2cSAbhishek Kulkarni /** 24460e78d2cSAbhishek Kulkarni * v9fs_launder_page - Writeback a dirty page 245bc868036SDavid Howells * @page: The page to be cleaned up 246bc868036SDavid Howells * 24760e78d2cSAbhishek Kulkarni * Returns 0 on success. 24860e78d2cSAbhishek Kulkarni */ 24960e78d2cSAbhishek Kulkarni 25060e78d2cSAbhishek Kulkarni static int v9fs_launder_page(struct page *page) 25160e78d2cSAbhishek Kulkarni { 25278525c74SDavid Howells struct folio *folio = page_folio(page); 2537263cebeSAneesh Kumar K.V int retval; 2547263cebeSAneesh Kumar K.V 25578525c74SDavid Howells if (folio_clear_dirty_for_io(folio)) { 25678525c74SDavid Howells retval = v9fs_vfs_write_folio_locked(folio); 2577263cebeSAneesh Kumar K.V if (retval) 2587263cebeSAneesh Kumar K.V return retval; 2597263cebeSAneesh Kumar K.V } 26078525c74SDavid Howells folio_wait_fscache(folio); 26160e78d2cSAbhishek Kulkarni return 0; 26260e78d2cSAbhishek Kulkarni } 26360e78d2cSAbhishek Kulkarni 2643e24ad2fSjvrao /** 2653e24ad2fSjvrao * v9fs_direct_IO - 9P address space operation for direct I/O 2663e24ad2fSjvrao * @iocb: target I/O control block 267bc868036SDavid Howells * @iter: The data/buffer to use 2683e24ad2fSjvrao * 2693e24ad2fSjvrao * The presence of v9fs_direct_IO() in the address space ops vector 2703e24ad2fSjvrao * allowes open() O_DIRECT flags which would have failed otherwise. 2713e24ad2fSjvrao * 2723e24ad2fSjvrao * In the non-cached mode, we shunt off direct read and write requests before 2733e24ad2fSjvrao * the VFS gets them, so this method should never be called. 2743e24ad2fSjvrao * 2753e24ad2fSjvrao * Direct IO is not 'yet' supported in the cached mode. Hence when 2763e24ad2fSjvrao * this routine is called through generic_file_aio_read(), the read/write fails 2773e24ad2fSjvrao * with an error. 2783e24ad2fSjvrao * 2793e24ad2fSjvrao */ 280e959b549SAneesh Kumar K.V static ssize_t 281c8b8e32dSChristoph Hellwig v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 2823e24ad2fSjvrao { 2839565a544SAl Viro struct file *file = iocb->ki_filp; 284c8b8e32dSChristoph Hellwig loff_t pos = iocb->ki_pos; 28542b1ab97SAl Viro ssize_t n; 2869565a544SAl Viro int err = 0; 2876e195b0fSDominique Martinet 2886f673763SOmar Sandoval if (iov_iter_rw(iter) == WRITE) { 28942b1ab97SAl Viro n = p9_client_write(file->private_data, pos, iter, &err); 29042b1ab97SAl Viro if (n) { 2919565a544SAl Viro struct inode *inode = file_inode(file); 2929565a544SAl Viro loff_t i_size = i_size_read(inode); 2936e195b0fSDominique Martinet 29442b1ab97SAl Viro if (pos + n > i_size) 29542b1ab97SAl Viro inode_add_bytes(inode, pos + n - i_size); 2969565a544SAl Viro } 29742b1ab97SAl Viro } else { 29842b1ab97SAl Viro n = p9_client_read(file->private_data, pos, iter, &err); 2999565a544SAl Viro } 30042b1ab97SAl Viro return n ? n : err; 3013e24ad2fSjvrao } 3027263cebeSAneesh Kumar K.V 3037263cebeSAneesh Kumar K.V static int v9fs_write_begin(struct file *filp, struct address_space *mapping, 3046e195b0fSDominique Martinet loff_t pos, unsigned int len, unsigned int flags, 30578525c74SDavid Howells struct page **subpagep, void **fsdata) 3067263cebeSAneesh Kumar K.V { 307eb497943SDavid Howells int retval; 30878525c74SDavid Howells struct folio *folio; 309eb497943SDavid Howells struct v9fs_inode *v9inode = V9FS_I(mapping->host); 310fb89b45cSDominique Martinet 311fb89b45cSDominique Martinet p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); 312fb89b45cSDominique Martinet 3136b39f6d2SAneesh Kumar K.V BUG_ON(!v9inode->writeback_fid); 3147263cebeSAneesh Kumar K.V 315eb497943SDavid Howells /* Prefetch area to be written into the cache if we're caching this 316eb497943SDavid Howells * file. We need to do this before we get a lock on the page in case 317eb497943SDavid Howells * there's more than one writer competing for the same cache block. 318eb497943SDavid Howells */ 31978525c74SDavid Howells retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata, 320eb497943SDavid Howells &v9fs_req_ops, NULL); 321eb497943SDavid Howells if (retval < 0) 322eb497943SDavid Howells return retval; 3237263cebeSAneesh Kumar K.V 32478525c74SDavid Howells *subpagep = &folio->page; 3257263cebeSAneesh Kumar K.V return retval; 3267263cebeSAneesh Kumar K.V } 3277263cebeSAneesh Kumar K.V 3287263cebeSAneesh Kumar K.V static int v9fs_write_end(struct file *filp, struct address_space *mapping, 3296e195b0fSDominique Martinet loff_t pos, unsigned int len, unsigned int copied, 33078525c74SDavid Howells struct page *subpage, void *fsdata) 3317263cebeSAneesh Kumar K.V { 3327263cebeSAneesh Kumar K.V loff_t last_pos = pos + copied; 33378525c74SDavid Howells struct folio *folio = page_folio(subpage); 33478525c74SDavid Howells struct inode *inode = mapping->host; 33593c84614SDavid Howells struct v9fs_inode *v9inode = V9FS_I(inode); 3367263cebeSAneesh Kumar K.V 337fb89b45cSDominique Martinet p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); 338fb89b45cSDominique Martinet 33978525c74SDavid Howells if (!folio_test_uptodate(folio)) { 34056ae414eSAlexander Levin if (unlikely(copied < len)) { 34177469c3fSAl Viro copied = 0; 34277469c3fSAl Viro goto out; 343eb497943SDavid Howells } 344eb497943SDavid Howells 34578525c74SDavid Howells folio_mark_uptodate(folio); 34656ae414eSAlexander Levin } 347eb497943SDavid Howells 3487263cebeSAneesh Kumar K.V /* 3497263cebeSAneesh Kumar K.V * No need to use i_size_read() here, the i_size 3507263cebeSAneesh Kumar K.V * cannot change under us because we hold the i_mutex. 3517263cebeSAneesh Kumar K.V */ 3527263cebeSAneesh Kumar K.V if (last_pos > inode->i_size) { 3537263cebeSAneesh Kumar K.V inode_add_bytes(inode, last_pos - inode->i_size); 3547263cebeSAneesh Kumar K.V i_size_write(inode, last_pos); 35593c84614SDavid Howells fscache_update_cookie(v9fs_inode_cookie(v9inode), NULL, &last_pos); 3567263cebeSAneesh Kumar K.V } 35778525c74SDavid Howells folio_mark_dirty(folio); 35877469c3fSAl Viro out: 35978525c74SDavid Howells folio_unlock(folio); 36078525c74SDavid Howells folio_put(folio); 3617263cebeSAneesh Kumar K.V 3627263cebeSAneesh Kumar K.V return copied; 3637263cebeSAneesh Kumar K.V } 3647263cebeSAneesh Kumar K.V 36593c84614SDavid Howells #ifdef CONFIG_9P_FSCACHE 36693c84614SDavid Howells /* 36793c84614SDavid Howells * Mark a page as having been made dirty and thus needing writeback. We also 36893c84614SDavid Howells * need to pin the cache object to write back to. 36993c84614SDavid Howells */ 37093c84614SDavid Howells static int v9fs_set_page_dirty(struct page *page) 37193c84614SDavid Howells { 37293c84614SDavid Howells struct v9fs_inode *v9inode = V9FS_I(page->mapping->host); 37393c84614SDavid Howells 37493c84614SDavid Howells return fscache_set_page_dirty(page, v9fs_inode_cookie(v9inode)); 37593c84614SDavid Howells } 37693c84614SDavid Howells #else 37793c84614SDavid Howells #define v9fs_set_page_dirty __set_page_dirty_nobuffers 37893c84614SDavid Howells #endif 3797263cebeSAneesh Kumar K.V 380f5e54d6eSChristoph Hellwig const struct address_space_operations v9fs_addr_operations = { 381147b31cfSEric Van Hensbergen .readpage = v9fs_vfs_readpage, 382eb497943SDavid Howells .readahead = v9fs_vfs_readahead, 38393c84614SDavid Howells .set_page_dirty = v9fs_set_page_dirty, 3847263cebeSAneesh Kumar K.V .writepage = v9fs_vfs_writepage, 3857263cebeSAneesh Kumar K.V .write_begin = v9fs_write_begin, 3867263cebeSAneesh Kumar K.V .write_end = v9fs_write_end, 38760e78d2cSAbhishek Kulkarni .releasepage = v9fs_release_page, 388*040cdd4bSMatthew Wilcox (Oracle) .invalidate_folio = v9fs_invalidate_folio, 38960e78d2cSAbhishek Kulkarni .launder_page = v9fs_launder_page, 3903e24ad2fSjvrao .direct_IO = v9fs_direct_IO, 391147b31cfSEric Van Hensbergen }; 392