xref: /openbmc/linux/fs/9p/vfs_addr.c (revision 4b33b5ff)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file contians vfs address (mmap) ops for 9P2000.
4  *
5  *  Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
6  *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/errno.h>
11 #include <linux/fs.h>
12 #include <linux/file.h>
13 #include <linux/stat.h>
14 #include <linux/string.h>
15 #include <linux/inet.h>
16 #include <linux/pagemap.h>
17 #include <linux/idr.h>
18 #include <linux/sched.h>
19 #include <linux/swap.h>
20 #include <linux/uio.h>
21 #include <linux/netfs.h>
22 #include <net/9p/9p.h>
23 #include <net/9p/client.h>
24 
25 #include "v9fs.h"
26 #include "v9fs_vfs.h"
27 #include "cache.h"
28 #include "fid.h"
29 
30 /**
31  * v9fs_issue_read - Issue a read from 9P
32  * @subreq: The read to make
33  */
34 static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
35 {
36 	struct netfs_io_request *rreq = subreq->rreq;
37 	struct p9_fid *fid = rreq->netfs_priv;
38 	struct iov_iter to;
39 	loff_t pos = subreq->start + subreq->transferred;
40 	size_t len = subreq->len   - subreq->transferred;
41 	int total, err;
42 
43 	iov_iter_xarray(&to, READ, &rreq->mapping->i_pages, pos, len);
44 
45 	total = p9_client_read(fid, pos, &to, &err);
46 
47 	/* if we just extended the file size, any portion not in
48 	 * cache won't be on server and is zeroes */
49 	__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
50 
51 	netfs_subreq_terminated(subreq, err ?: total, false);
52 }
53 
54 /**
55  * v9fs_init_request - Initialise a read request
56  * @rreq: The read request
57  * @file: The file being read from
58  */
59 static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
60 {
61 	struct p9_fid *fid = file->private_data;
62 
63 	refcount_inc(&fid->count);
64 	rreq->netfs_priv = fid;
65 	return 0;
66 }
67 
68 /**
69  * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_request
70  * @mapping: unused mapping of request to cleanup
71  * @priv: private data to cleanup, a fid, guaranted non-null.
72  */
73 static void v9fs_req_cleanup(struct address_space *mapping, void *priv)
74 {
75 	struct p9_fid *fid = priv;
76 
77 	p9_client_clunk(fid);
78 }
79 
80 /**
81  * v9fs_begin_cache_operation - Begin a cache operation for a read
82  * @rreq: The read request
83  */
84 static int v9fs_begin_cache_operation(struct netfs_io_request *rreq)
85 {
86 #ifdef CONFIG_9P_FSCACHE
87 	struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
88 
89 	return fscache_begin_read_operation(&rreq->cache_resources, cookie);
90 #else
91 	return -ENOBUFS;
92 #endif
93 }
94 
95 const struct netfs_request_ops v9fs_req_ops = {
96 	.init_request		= v9fs_init_request,
97 	.begin_cache_operation	= v9fs_begin_cache_operation,
98 	.issue_read		= v9fs_issue_read,
99 	.cleanup		= v9fs_req_cleanup,
100 };
101 
102 /**
103  * v9fs_release_page - release the private state associated with a page
104  * @page: The page to be released
105  * @gfp: The caller's allocation restrictions
106  *
107  * Returns 1 if the page can be released, false otherwise.
108  */
109 
110 static int v9fs_release_page(struct page *page, gfp_t gfp)
111 {
112 	struct folio *folio = page_folio(page);
113 	struct inode *inode = folio_inode(folio);
114 
115 	if (folio_test_private(folio))
116 		return 0;
117 #ifdef CONFIG_9P_FSCACHE
118 	if (folio_test_fscache(folio)) {
119 		if (current_is_kswapd() || !(gfp & __GFP_FS))
120 			return 0;
121 		folio_wait_fscache(folio);
122 	}
123 #endif
124 	fscache_note_page_release(v9fs_inode_cookie(V9FS_I(inode)));
125 	return 1;
126 }
127 
128 static void v9fs_invalidate_folio(struct folio *folio, size_t offset,
129 				 size_t length)
130 {
131 	folio_wait_fscache(folio);
132 }
133 
134 static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
135 				     bool was_async)
136 {
137 	struct v9fs_inode *v9inode = priv;
138 	__le32 version;
139 
140 	if (IS_ERR_VALUE(transferred_or_error) &&
141 	    transferred_or_error != -ENOBUFS) {
142 		version = cpu_to_le32(v9inode->qid.version);
143 		fscache_invalidate(v9fs_inode_cookie(v9inode), &version,
144 				   i_size_read(&v9inode->vfs_inode), 0);
145 	}
146 }
147 
148 static int v9fs_vfs_write_folio_locked(struct folio *folio)
149 {
150 	struct inode *inode = folio_inode(folio);
151 	struct v9fs_inode *v9inode = V9FS_I(inode);
152 	struct fscache_cookie *cookie = v9fs_inode_cookie(v9inode);
153 	loff_t start = folio_pos(folio);
154 	loff_t i_size = i_size_read(inode);
155 	struct iov_iter from;
156 	size_t len = folio_size(folio);
157 	int err;
158 
159 	if (start >= i_size)
160 		return 0; /* Simultaneous truncation occurred */
161 
162 	len = min_t(loff_t, i_size - start, len);
163 
164 	iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len);
165 
166 	/* We should have writeback_fid always set */
167 	BUG_ON(!v9inode->writeback_fid);
168 
169 	folio_wait_fscache(folio);
170 	folio_start_writeback(folio);
171 
172 	p9_client_write(v9inode->writeback_fid, start, &from, &err);
173 
174 	if (err == 0 &&
175 	    fscache_cookie_enabled(cookie) &&
176 	    test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) {
177 		folio_start_fscache(folio);
178 		fscache_write_to_cache(v9fs_inode_cookie(v9inode),
179 				       folio_mapping(folio), start, len, i_size,
180 				       v9fs_write_to_cache_done, v9inode,
181 				       true);
182 	}
183 
184 	folio_end_writeback(folio);
185 	return err;
186 }
187 
188 static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
189 {
190 	struct folio *folio = page_folio(page);
191 	int retval;
192 
193 	p9_debug(P9_DEBUG_VFS, "folio %p\n", folio);
194 
195 	retval = v9fs_vfs_write_folio_locked(folio);
196 	if (retval < 0) {
197 		if (retval == -EAGAIN) {
198 			folio_redirty_for_writepage(wbc, folio);
199 			retval = 0;
200 		} else {
201 			mapping_set_error(folio_mapping(folio), retval);
202 		}
203 	} else
204 		retval = 0;
205 
206 	folio_unlock(folio);
207 	return retval;
208 }
209 
210 static int v9fs_launder_folio(struct folio *folio)
211 {
212 	int retval;
213 
214 	if (folio_clear_dirty_for_io(folio)) {
215 		retval = v9fs_vfs_write_folio_locked(folio);
216 		if (retval)
217 			return retval;
218 	}
219 	folio_wait_fscache(folio);
220 	return 0;
221 }
222 
223 /**
224  * v9fs_direct_IO - 9P address space operation for direct I/O
225  * @iocb: target I/O control block
226  * @iter: The data/buffer to use
227  *
228  * The presence of v9fs_direct_IO() in the address space ops vector
229  * allowes open() O_DIRECT flags which would have failed otherwise.
230  *
231  * In the non-cached mode, we shunt off direct read and write requests before
232  * the VFS gets them, so this method should never be called.
233  *
234  * Direct IO is not 'yet' supported in the cached mode. Hence when
235  * this routine is called through generic_file_aio_read(), the read/write fails
236  * with an error.
237  *
238  */
239 static ssize_t
240 v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
241 {
242 	struct file *file = iocb->ki_filp;
243 	loff_t pos = iocb->ki_pos;
244 	ssize_t n;
245 	int err = 0;
246 
247 	if (iov_iter_rw(iter) == WRITE) {
248 		n = p9_client_write(file->private_data, pos, iter, &err);
249 		if (n) {
250 			struct inode *inode = file_inode(file);
251 			loff_t i_size = i_size_read(inode);
252 
253 			if (pos + n > i_size)
254 				inode_add_bytes(inode, pos + n - i_size);
255 		}
256 	} else {
257 		n = p9_client_read(file->private_data, pos, iter, &err);
258 	}
259 	return n ? n : err;
260 }
261 
262 static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
263 			    loff_t pos, unsigned int len, unsigned int flags,
264 			    struct page **subpagep, void **fsdata)
265 {
266 	int retval;
267 	struct folio *folio;
268 	struct v9fs_inode *v9inode = V9FS_I(mapping->host);
269 
270 	p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
271 
272 	BUG_ON(!v9inode->writeback_fid);
273 
274 	/* Prefetch area to be written into the cache if we're caching this
275 	 * file.  We need to do this before we get a lock on the page in case
276 	 * there's more than one writer competing for the same cache block.
277 	 */
278 	retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata);
279 	if (retval < 0)
280 		return retval;
281 
282 	*subpagep = &folio->page;
283 	return retval;
284 }
285 
286 static int v9fs_write_end(struct file *filp, struct address_space *mapping,
287 			  loff_t pos, unsigned int len, unsigned int copied,
288 			  struct page *subpage, void *fsdata)
289 {
290 	loff_t last_pos = pos + copied;
291 	struct folio *folio = page_folio(subpage);
292 	struct inode *inode = mapping->host;
293 	struct v9fs_inode *v9inode = V9FS_I(inode);
294 
295 	p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
296 
297 	if (!folio_test_uptodate(folio)) {
298 		if (unlikely(copied < len)) {
299 			copied = 0;
300 			goto out;
301 		}
302 
303 		folio_mark_uptodate(folio);
304 	}
305 
306 	/*
307 	 * No need to use i_size_read() here, the i_size
308 	 * cannot change under us because we hold the i_mutex.
309 	 */
310 	if (last_pos > inode->i_size) {
311 		inode_add_bytes(inode, last_pos - inode->i_size);
312 		i_size_write(inode, last_pos);
313 		fscache_update_cookie(v9fs_inode_cookie(v9inode), NULL, &last_pos);
314 	}
315 	folio_mark_dirty(folio);
316 out:
317 	folio_unlock(folio);
318 	folio_put(folio);
319 
320 	return copied;
321 }
322 
323 #ifdef CONFIG_9P_FSCACHE
324 /*
325  * Mark a page as having been made dirty and thus needing writeback.  We also
326  * need to pin the cache object to write back to.
327  */
328 static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio)
329 {
330 	struct v9fs_inode *v9inode = V9FS_I(mapping->host);
331 
332 	return fscache_dirty_folio(mapping, folio, v9fs_inode_cookie(v9inode));
333 }
334 #else
335 #define v9fs_dirty_folio filemap_dirty_folio
336 #endif
337 
338 const struct address_space_operations v9fs_addr_operations = {
339 	.readpage = netfs_readpage,
340 	.readahead = netfs_readahead,
341 	.dirty_folio = v9fs_dirty_folio,
342 	.writepage = v9fs_vfs_writepage,
343 	.write_begin = v9fs_write_begin,
344 	.write_end = v9fs_write_end,
345 	.releasepage = v9fs_release_page,
346 	.invalidate_folio = v9fs_invalidate_folio,
347 	.launder_folio = v9fs_launder_folio,
348 	.direct_IO = v9fs_direct_IO,
349 };
350