xref: /openbmc/linux/fs/9p/vfs_addr.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
11f327613SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2147b31cfSEric Van Hensbergen /*
3147b31cfSEric Van Hensbergen  * This file contians vfs address (mmap) ops for 9P2000.
4147b31cfSEric Van Hensbergen  *
5147b31cfSEric Van Hensbergen  *  Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
6147b31cfSEric Van Hensbergen  *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
7147b31cfSEric Van Hensbergen  */
8147b31cfSEric Van Hensbergen 
9147b31cfSEric Van Hensbergen #include <linux/module.h>
10147b31cfSEric Van Hensbergen #include <linux/errno.h>
11147b31cfSEric Van Hensbergen #include <linux/fs.h>
12147b31cfSEric Van Hensbergen #include <linux/file.h>
13147b31cfSEric Van Hensbergen #include <linux/stat.h>
14147b31cfSEric Van Hensbergen #include <linux/string.h>
15147b31cfSEric Van Hensbergen #include <linux/pagemap.h>
16e8edc6e0SAlexey Dobriyan #include <linux/sched.h>
17d7bdba1cSDavid Howells #include <linux/swap.h>
18e2e40f2cSChristoph Hellwig #include <linux/uio.h>
19eb497943SDavid Howells #include <linux/netfs.h>
20bd238fb4SLatchesar Ionkov #include <net/9p/9p.h>
21bd238fb4SLatchesar Ionkov #include <net/9p/client.h>
22147b31cfSEric Van Hensbergen 
23147b31cfSEric Van Hensbergen #include "v9fs.h"
24147b31cfSEric Van Hensbergen #include "v9fs_vfs.h"
2560e78d2cSAbhishek Kulkarni #include "cache.h"
267263cebeSAneesh Kumar K.V #include "fid.h"
27147b31cfSEric Van Hensbergen 
28147b31cfSEric Van Hensbergen /**
29f18a3785SDavid Howells  * v9fs_issue_read - Issue a read from 9P
30eb497943SDavid Howells  * @subreq: The read to make
31147b31cfSEric Van Hensbergen  */
v9fs_issue_read(struct netfs_io_subrequest * subreq)32f18a3785SDavid Howells static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
33147b31cfSEric Van Hensbergen {
346a19114bSDavid Howells 	struct netfs_io_request *rreq = subreq->rreq;
35eb497943SDavid Howells 	struct p9_fid *fid = rreq->netfs_priv;
36e1200fe6SAl Viro 	struct iov_iter to;
37eb497943SDavid Howells 	loff_t pos = subreq->start + subreq->transferred;
38eb497943SDavid Howells 	size_t len = subreq->len   - subreq->transferred;
39eb497943SDavid Howells 	int total, err;
40147b31cfSEric Van Hensbergen 
41de4eda9dSAl Viro 	iov_iter_xarray(&to, ITER_DEST, &rreq->mapping->i_pages, pos, len);
4260e78d2cSAbhishek Kulkarni 
43eb497943SDavid Howells 	total = p9_client_read(fid, pos, &to, &err);
4419d1c326SDominique Martinet 
4519d1c326SDominique Martinet 	/* if we just extended the file size, any portion not in
4619d1c326SDominique Martinet 	 * cache won't be on server and is zeroes */
4719d1c326SDominique Martinet 	__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
4819d1c326SDominique Martinet 
49eb497943SDavid Howells 	netfs_subreq_terminated(subreq, err ?: total, false);
5060e78d2cSAbhishek Kulkarni }
51147b31cfSEric Van Hensbergen 
52eb497943SDavid Howells /**
536a19114bSDavid Howells  * v9fs_init_request - Initialise a read request
54eb497943SDavid Howells  * @rreq: The read request
55eb497943SDavid Howells  * @file: The file being read from
56eb497943SDavid Howells  */
v9fs_init_request(struct netfs_io_request * rreq,struct file * file)572de16041SDavid Howells static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
58eb497943SDavid Howells {
59eb497943SDavid Howells 	struct p9_fid *fid = file->private_data;
6060e78d2cSAbhishek Kulkarni 
61b0017602SDominique Martinet 	BUG_ON(!fid);
62b0017602SDominique Martinet 
63b0017602SDominique Martinet 	/* we might need to read from a fid that was opened write-only
64b0017602SDominique Martinet 	 * for read-modify-write of page cache, use the writeback fid
65b0017602SDominique Martinet 	 * for that */
661543b4c5SEric Van Hensbergen 	WARN_ON(rreq->origin == NETFS_READ_FOR_WRITE &&
671543b4c5SEric Van Hensbergen 			!(fid->mode & P9_ORDWR));
68b0017602SDominique Martinet 
69b48dbb99SDominique Martinet 	p9_fid_get(fid);
70eb497943SDavid Howells 	rreq->netfs_priv = fid;
712de16041SDavid Howells 	return 0;
72147b31cfSEric Van Hensbergen }
73147b31cfSEric Van Hensbergen 
7460e78d2cSAbhishek Kulkarni /**
7540a81101SDavid Howells  * v9fs_free_request - Cleanup request initialized by v9fs_init_rreq
7640a81101SDavid Howells  * @rreq: The I/O request to clean up
77eb497943SDavid Howells  */
v9fs_free_request(struct netfs_io_request * rreq)7840a81101SDavid Howells static void v9fs_free_request(struct netfs_io_request *rreq)
79eb497943SDavid Howells {
8040a81101SDavid Howells 	struct p9_fid *fid = rreq->netfs_priv;
81eb497943SDavid Howells 
82b48dbb99SDominique Martinet 	p9_fid_put(fid);
83eb497943SDavid Howells }
84eb497943SDavid Howells 
85eb497943SDavid Howells /**
86eb497943SDavid Howells  * v9fs_begin_cache_operation - Begin a cache operation for a read
87eb497943SDavid Howells  * @rreq: The read request
88eb497943SDavid Howells  */
v9fs_begin_cache_operation(struct netfs_io_request * rreq)896a19114bSDavid Howells static int v9fs_begin_cache_operation(struct netfs_io_request *rreq)
90eb497943SDavid Howells {
912cee6fbbSDavid Howells #ifdef CONFIG_9P_FSCACHE
92eb497943SDavid Howells 	struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
93eb497943SDavid Howells 
9424e42e32SDavid Howells 	return fscache_begin_read_operation(&rreq->cache_resources, cookie);
952cee6fbbSDavid Howells #else
962cee6fbbSDavid Howells 	return -ENOBUFS;
972cee6fbbSDavid Howells #endif
98eb497943SDavid Howells }
99eb497943SDavid Howells 
100bc899ee1SDavid Howells const struct netfs_request_ops v9fs_req_ops = {
1016a19114bSDavid Howells 	.init_request		= v9fs_init_request,
10240a81101SDavid Howells 	.free_request		= v9fs_free_request,
103eb497943SDavid Howells 	.begin_cache_operation	= v9fs_begin_cache_operation,
104f18a3785SDavid Howells 	.issue_read		= v9fs_issue_read,
105eb497943SDavid Howells };
106eb497943SDavid Howells 
107eb497943SDavid Howells /**
108a26d3411SMatthew Wilcox (Oracle)  * v9fs_release_folio - release the private state associated with a folio
109a26d3411SMatthew Wilcox (Oracle)  * @folio: The folio to be released
110bc868036SDavid Howells  * @gfp: The caller's allocation restrictions
11160e78d2cSAbhishek Kulkarni  *
112a26d3411SMatthew Wilcox (Oracle)  * Returns true if the page can be released, false otherwise.
11360e78d2cSAbhishek Kulkarni  */
11460e78d2cSAbhishek Kulkarni 
v9fs_release_folio(struct folio * folio,gfp_t gfp)115a26d3411SMatthew Wilcox (Oracle) static bool v9fs_release_folio(struct folio *folio, gfp_t gfp)
11660e78d2cSAbhishek Kulkarni {
11778525c74SDavid Howells 	if (folio_test_private(folio))
118a26d3411SMatthew Wilcox (Oracle) 		return false;
119eb497943SDavid Howells #ifdef CONFIG_9P_FSCACHE
12078525c74SDavid Howells 	if (folio_test_fscache(folio)) {
121d7bdba1cSDavid Howells 		if (current_is_kswapd() || !(gfp & __GFP_FS))
122a26d3411SMatthew Wilcox (Oracle) 			return false;
12378525c74SDavid Howells 		folio_wait_fscache(folio);
124eb497943SDavid Howells 	}
125*4eb31178SEric Van Hensbergen 	fscache_note_page_release(v9fs_inode_cookie(V9FS_I(folio_inode(folio))));
126eb497943SDavid Howells #endif
127a26d3411SMatthew Wilcox (Oracle) 	return true;
12860e78d2cSAbhishek Kulkarni }
12960e78d2cSAbhishek Kulkarni 
v9fs_invalidate_folio(struct folio * folio,size_t offset,size_t length)130040cdd4bSMatthew Wilcox (Oracle) static void v9fs_invalidate_folio(struct folio *folio, size_t offset,
131040cdd4bSMatthew Wilcox (Oracle) 				 size_t length)
13260e78d2cSAbhishek Kulkarni {
13378525c74SDavid Howells 	folio_wait_fscache(folio);
13460e78d2cSAbhishek Kulkarni }
13560e78d2cSAbhishek Kulkarni 
136*4eb31178SEric Van Hensbergen #ifdef CONFIG_9P_FSCACHE
v9fs_write_to_cache_done(void * priv,ssize_t transferred_or_error,bool was_async)13793c84614SDavid Howells static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
13893c84614SDavid Howells 				     bool was_async)
13993c84614SDavid Howells {
14093c84614SDavid Howells 	struct v9fs_inode *v9inode = priv;
14193c84614SDavid Howells 	__le32 version;
14293c84614SDavid Howells 
14393c84614SDavid Howells 	if (IS_ERR_VALUE(transferred_or_error) &&
14493c84614SDavid Howells 	    transferred_or_error != -ENOBUFS) {
14593c84614SDavid Howells 		version = cpu_to_le32(v9inode->qid.version);
14693c84614SDavid Howells 		fscache_invalidate(v9fs_inode_cookie(v9inode), &version,
147874c8ca1SDavid Howells 				   i_size_read(&v9inode->netfs.inode), 0);
14893c84614SDavid Howells 	}
14993c84614SDavid Howells }
150*4eb31178SEric Van Hensbergen #endif
15193c84614SDavid Howells 
v9fs_vfs_write_folio_locked(struct folio * folio)15278525c74SDavid Howells static int v9fs_vfs_write_folio_locked(struct folio *folio)
1537263cebeSAneesh Kumar K.V {
15478525c74SDavid Howells 	struct inode *inode = folio_inode(folio);
15578525c74SDavid Howells 	loff_t start = folio_pos(folio);
15678525c74SDavid Howells 	loff_t i_size = i_size_read(inode);
157371098c6SAl Viro 	struct iov_iter from;
15878525c74SDavid Howells 	size_t len = folio_size(folio);
1591543b4c5SEric Van Hensbergen 	struct p9_fid *writeback_fid;
16078525c74SDavid Howells 	int err;
161*4eb31178SEric Van Hensbergen 	struct v9fs_inode __maybe_unused *v9inode = V9FS_I(inode);
162*4eb31178SEric Van Hensbergen 	struct fscache_cookie __maybe_unused *cookie = v9fs_inode_cookie(v9inode);
1637263cebeSAneesh Kumar K.V 
16478525c74SDavid Howells 	if (start >= i_size)
16578525c74SDavid Howells 		return 0; /* Simultaneous truncation occurred */
1667263cebeSAneesh Kumar K.V 
16778525c74SDavid Howells 	len = min_t(loff_t, i_size - start, len);
16878525c74SDavid Howells 
169de4eda9dSAl Viro 	iov_iter_xarray(&from, ITER_SOURCE, &folio_mapping(folio)->i_pages, start, len);
1707263cebeSAneesh Kumar K.V 
1711543b4c5SEric Van Hensbergen 	writeback_fid = v9fs_fid_find_inode(inode, true, INVALID_UID, true);
1721543b4c5SEric Van Hensbergen 	if (!writeback_fid) {
1731543b4c5SEric Van Hensbergen 		WARN_ONCE(1, "folio expected an open fid inode->i_private=%p\n",
1741543b4c5SEric Van Hensbergen 			inode->i_private);
1751543b4c5SEric Van Hensbergen 		return -EINVAL;
1761543b4c5SEric Van Hensbergen 	}
1777263cebeSAneesh Kumar K.V 
17893c84614SDavid Howells 	folio_wait_fscache(folio);
17978525c74SDavid Howells 	folio_start_writeback(folio);
1807263cebeSAneesh Kumar K.V 
1811543b4c5SEric Van Hensbergen 	p9_client_write(writeback_fid, start, &from, &err);
182371098c6SAl Viro 
183*4eb31178SEric Van Hensbergen #ifdef CONFIG_9P_FSCACHE
18493c84614SDavid Howells 	if (err == 0 &&
18593c84614SDavid Howells 		fscache_cookie_enabled(cookie) &&
18693c84614SDavid Howells 		test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) {
18793c84614SDavid Howells 		folio_start_fscache(folio);
18893c84614SDavid Howells 		fscache_write_to_cache(v9fs_inode_cookie(v9inode),
18993c84614SDavid Howells 					folio_mapping(folio), start, len, i_size,
19093c84614SDavid Howells 					v9fs_write_to_cache_done, v9inode,
19193c84614SDavid Howells 					true);
19293c84614SDavid Howells 	}
193*4eb31178SEric Van Hensbergen #endif
19493c84614SDavid Howells 
19578525c74SDavid Howells 	folio_end_writeback(folio);
1961543b4c5SEric Van Hensbergen 	p9_fid_put(writeback_fid);
1971543b4c5SEric Van Hensbergen 
198371098c6SAl Viro 	return err;
1997263cebeSAneesh Kumar K.V }
2007263cebeSAneesh Kumar K.V 
v9fs_vfs_writepage(struct page * page,struct writeback_control * wbc)2017263cebeSAneesh Kumar K.V static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
2027263cebeSAneesh Kumar K.V {
20378525c74SDavid Howells 	struct folio *folio = page_folio(page);
2047263cebeSAneesh Kumar K.V 	int retval;
2057263cebeSAneesh Kumar K.V 
20678525c74SDavid Howells 	p9_debug(P9_DEBUG_VFS, "folio %p\n", folio);
207fb89b45cSDominique Martinet 
20878525c74SDavid Howells 	retval = v9fs_vfs_write_folio_locked(folio);
2097263cebeSAneesh Kumar K.V 	if (retval < 0) {
2107263cebeSAneesh Kumar K.V 		if (retval == -EAGAIN) {
21178525c74SDavid Howells 			folio_redirty_for_writepage(wbc, folio);
2127263cebeSAneesh Kumar K.V 			retval = 0;
2137263cebeSAneesh Kumar K.V 		} else {
21478525c74SDavid Howells 			mapping_set_error(folio_mapping(folio), retval);
2157263cebeSAneesh Kumar K.V 		}
2167263cebeSAneesh Kumar K.V 	} else
2177263cebeSAneesh Kumar K.V 		retval = 0;
2187263cebeSAneesh Kumar K.V 
21978525c74SDavid Howells 	folio_unlock(folio);
2207263cebeSAneesh Kumar K.V 	return retval;
2217263cebeSAneesh Kumar K.V }
2227263cebeSAneesh Kumar K.V 
v9fs_launder_folio(struct folio * folio)22376dba927SMatthew Wilcox (Oracle) static int v9fs_launder_folio(struct folio *folio)
22460e78d2cSAbhishek Kulkarni {
2257263cebeSAneesh Kumar K.V 	int retval;
2267263cebeSAneesh Kumar K.V 
22778525c74SDavid Howells 	if (folio_clear_dirty_for_io(folio)) {
22878525c74SDavid Howells 		retval = v9fs_vfs_write_folio_locked(folio);
2297263cebeSAneesh Kumar K.V 		if (retval)
2307263cebeSAneesh Kumar K.V 			return retval;
2317263cebeSAneesh Kumar K.V 	}
23278525c74SDavid Howells 	folio_wait_fscache(folio);
23360e78d2cSAbhishek Kulkarni 	return 0;
23460e78d2cSAbhishek Kulkarni }
23560e78d2cSAbhishek Kulkarni 
2363e24ad2fSjvrao /**
2373e24ad2fSjvrao  * v9fs_direct_IO - 9P address space operation for direct I/O
2383e24ad2fSjvrao  * @iocb: target I/O control block
239bc868036SDavid Howells  * @iter: The data/buffer to use
2403e24ad2fSjvrao  *
2413e24ad2fSjvrao  * The presence of v9fs_direct_IO() in the address space ops vector
2423e24ad2fSjvrao  * allowes open() O_DIRECT flags which would have failed otherwise.
2433e24ad2fSjvrao  *
2443e24ad2fSjvrao  * In the non-cached mode, we shunt off direct read and write requests before
2453e24ad2fSjvrao  * the VFS gets them, so this method should never be called.
2463e24ad2fSjvrao  *
2473e24ad2fSjvrao  * Direct IO is not 'yet' supported in the cached mode. Hence when
2483e24ad2fSjvrao  * this routine is called through generic_file_aio_read(), the read/write fails
2493e24ad2fSjvrao  * with an error.
2503e24ad2fSjvrao  *
2513e24ad2fSjvrao  */
252e959b549SAneesh Kumar K.V static ssize_t
v9fs_direct_IO(struct kiocb * iocb,struct iov_iter * iter)253c8b8e32dSChristoph Hellwig v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2543e24ad2fSjvrao {
2559565a544SAl Viro 	struct file *file = iocb->ki_filp;
256c8b8e32dSChristoph Hellwig 	loff_t pos = iocb->ki_pos;
25742b1ab97SAl Viro 	ssize_t n;
2589565a544SAl Viro 	int err = 0;
2596e195b0fSDominique Martinet 
2606f673763SOmar Sandoval 	if (iov_iter_rw(iter) == WRITE) {
26142b1ab97SAl Viro 		n = p9_client_write(file->private_data, pos, iter, &err);
26242b1ab97SAl Viro 		if (n) {
2639565a544SAl Viro 			struct inode *inode = file_inode(file);
2649565a544SAl Viro 			loff_t i_size = i_size_read(inode);
2656e195b0fSDominique Martinet 
26642b1ab97SAl Viro 			if (pos + n > i_size)
26742b1ab97SAl Viro 				inode_add_bytes(inode, pos + n - i_size);
2689565a544SAl Viro 		}
26942b1ab97SAl Viro 	} else {
27042b1ab97SAl Viro 		n = p9_client_read(file->private_data, pos, iter, &err);
2719565a544SAl Viro 	}
27242b1ab97SAl Viro 	return n ? n : err;
2733e24ad2fSjvrao }
2747263cebeSAneesh Kumar K.V 
v9fs_write_begin(struct file * filp,struct address_space * mapping,loff_t pos,unsigned int len,struct page ** subpagep,void ** fsdata)2757263cebeSAneesh Kumar K.V static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
2769d6b0cd7SMatthew Wilcox (Oracle) 			    loff_t pos, unsigned int len,
27778525c74SDavid Howells 			    struct page **subpagep, void **fsdata)
2787263cebeSAneesh Kumar K.V {
279eb497943SDavid Howells 	int retval;
28078525c74SDavid Howells 	struct folio *folio;
281eb497943SDavid Howells 	struct v9fs_inode *v9inode = V9FS_I(mapping->host);
282fb89b45cSDominique Martinet 
283fb89b45cSDominique Martinet 	p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
284fb89b45cSDominique Martinet 
285eb497943SDavid Howells 	/* Prefetch area to be written into the cache if we're caching this
286eb497943SDavid Howells 	 * file.  We need to do this before we get a lock on the page in case
287eb497943SDavid Howells 	 * there's more than one writer competing for the same cache block.
288eb497943SDavid Howells 	 */
289e81fb419SLinus Torvalds 	retval = netfs_write_begin(&v9inode->netfs, filp, mapping, pos, len, &folio, fsdata);
290eb497943SDavid Howells 	if (retval < 0)
291eb497943SDavid Howells 		return retval;
2927263cebeSAneesh Kumar K.V 
29378525c74SDavid Howells 	*subpagep = &folio->page;
2947263cebeSAneesh Kumar K.V 	return retval;
2957263cebeSAneesh Kumar K.V }
2967263cebeSAneesh Kumar K.V 
v9fs_write_end(struct file * filp,struct address_space * mapping,loff_t pos,unsigned int len,unsigned int copied,struct page * subpage,void * fsdata)2977263cebeSAneesh Kumar K.V static int v9fs_write_end(struct file *filp, struct address_space *mapping,
2986e195b0fSDominique Martinet 			  loff_t pos, unsigned int len, unsigned int copied,
29978525c74SDavid Howells 			  struct page *subpage, void *fsdata)
3007263cebeSAneesh Kumar K.V {
3017263cebeSAneesh Kumar K.V 	loff_t last_pos = pos + copied;
30278525c74SDavid Howells 	struct folio *folio = page_folio(subpage);
30378525c74SDavid Howells 	struct inode *inode = mapping->host;
3047263cebeSAneesh Kumar K.V 
305fb89b45cSDominique Martinet 	p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
306fb89b45cSDominique Martinet 
30778525c74SDavid Howells 	if (!folio_test_uptodate(folio)) {
30856ae414eSAlexander Levin 		if (unlikely(copied < len)) {
30977469c3fSAl Viro 			copied = 0;
31077469c3fSAl Viro 			goto out;
311eb497943SDavid Howells 		}
312eb497943SDavid Howells 
31378525c74SDavid Howells 		folio_mark_uptodate(folio);
31456ae414eSAlexander Levin 	}
315eb497943SDavid Howells 
3167263cebeSAneesh Kumar K.V 	/*
3177263cebeSAneesh Kumar K.V 	 * No need to use i_size_read() here, the i_size
3187263cebeSAneesh Kumar K.V 	 * cannot change under us because we hold the i_mutex.
3197263cebeSAneesh Kumar K.V 	 */
3207263cebeSAneesh Kumar K.V 	if (last_pos > inode->i_size) {
3217263cebeSAneesh Kumar K.V 		inode_add_bytes(inode, last_pos - inode->i_size);
3227263cebeSAneesh Kumar K.V 		i_size_write(inode, last_pos);
323*4eb31178SEric Van Hensbergen #ifdef CONFIG_9P_FSCACHE
324*4eb31178SEric Van Hensbergen 		fscache_update_cookie(v9fs_inode_cookie(V9FS_I(inode)), NULL,
325*4eb31178SEric Van Hensbergen 			&last_pos);
326*4eb31178SEric Van Hensbergen #endif
3277263cebeSAneesh Kumar K.V 	}
32878525c74SDavid Howells 	folio_mark_dirty(folio);
32977469c3fSAl Viro out:
33078525c74SDavid Howells 	folio_unlock(folio);
33178525c74SDavid Howells 	folio_put(folio);
3327263cebeSAneesh Kumar K.V 
3337263cebeSAneesh Kumar K.V 	return copied;
3347263cebeSAneesh Kumar K.V }
3357263cebeSAneesh Kumar K.V 
33693c84614SDavid Howells #ifdef CONFIG_9P_FSCACHE
33793c84614SDavid Howells /*
33893c84614SDavid Howells  * Mark a page as having been made dirty and thus needing writeback.  We also
33993c84614SDavid Howells  * need to pin the cache object to write back to.
34093c84614SDavid Howells  */
v9fs_dirty_folio(struct address_space * mapping,struct folio * folio)3418fb72b4aSMatthew Wilcox (Oracle) static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio)
34293c84614SDavid Howells {
3438fb72b4aSMatthew Wilcox (Oracle) 	struct v9fs_inode *v9inode = V9FS_I(mapping->host);
34493c84614SDavid Howells 
3458fb72b4aSMatthew Wilcox (Oracle) 	return fscache_dirty_folio(mapping, folio, v9fs_inode_cookie(v9inode));
34693c84614SDavid Howells }
34793c84614SDavid Howells #else
3488fb72b4aSMatthew Wilcox (Oracle) #define v9fs_dirty_folio filemap_dirty_folio
34993c84614SDavid Howells #endif
3507263cebeSAneesh Kumar K.V 
351f5e54d6eSChristoph Hellwig const struct address_space_operations v9fs_addr_operations = {
3526c62371bSMatthew Wilcox (Oracle) 	.read_folio = netfs_read_folio,
353bc899ee1SDavid Howells 	.readahead = netfs_readahead,
3548fb72b4aSMatthew Wilcox (Oracle) 	.dirty_folio = v9fs_dirty_folio,
3557263cebeSAneesh Kumar K.V 	.writepage = v9fs_vfs_writepage,
3567263cebeSAneesh Kumar K.V 	.write_begin = v9fs_write_begin,
3577263cebeSAneesh Kumar K.V 	.write_end = v9fs_write_end,
358a26d3411SMatthew Wilcox (Oracle) 	.release_folio = v9fs_release_folio,
359040cdd4bSMatthew Wilcox (Oracle) 	.invalidate_folio = v9fs_invalidate_folio,
36076dba927SMatthew Wilcox (Oracle) 	.launder_folio = v9fs_launder_folio,
3613e24ad2fSjvrao 	.direct_IO = v9fs_direct_IO,
362147b31cfSEric Van Hensbergen };
363