xref: /openbmc/linux/fs/9p/vfs_addr.c (revision 9b656879)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file contians vfs address (mmap) ops for 9P2000.
4  *
5  *  Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
6  *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/errno.h>
11 #include <linux/fs.h>
12 #include <linux/file.h>
13 #include <linux/stat.h>
14 #include <linux/string.h>
15 #include <linux/inet.h>
16 #include <linux/pagemap.h>
17 #include <linux/idr.h>
18 #include <linux/sched.h>
19 #include <linux/swap.h>
20 #include <linux/uio.h>
21 #include <linux/netfs.h>
22 #include <net/9p/9p.h>
23 #include <net/9p/client.h>
24 
25 #include "v9fs.h"
26 #include "v9fs_vfs.h"
27 #include "cache.h"
28 #include "fid.h"
29 
30 /**
31  * v9fs_issue_read - Issue a read from 9P
32  * @subreq: The read to make
33  */
34 static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
35 {
36 	struct netfs_io_request *rreq = subreq->rreq;
37 	struct p9_fid *fid = rreq->netfs_priv;
38 	struct iov_iter to;
39 	loff_t pos = subreq->start + subreq->transferred;
40 	size_t len = subreq->len   - subreq->transferred;
41 	int total, err;
42 
43 	iov_iter_xarray(&to, READ, &rreq->mapping->i_pages, pos, len);
44 
45 	total = p9_client_read(fid, pos, &to, &err);
46 
47 	/* if we just extended the file size, any portion not in
48 	 * cache won't be on server and is zeroes */
49 	__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
50 
51 	netfs_subreq_terminated(subreq, err ?: total, false);
52 }
53 
54 /**
55  * v9fs_init_request - Initialise a read request
56  * @rreq: The read request
57  * @file: The file being read from
58  */
59 static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
60 {
61 	struct p9_fid *fid = file->private_data;
62 
63 	refcount_inc(&fid->count);
64 	rreq->netfs_priv = fid;
65 	return 0;
66 }
67 
68 /**
69  * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_request
70  * @mapping: unused mapping of request to cleanup
71  * @priv: private data to cleanup, a fid, guaranted non-null.
72  */
73 static void v9fs_req_cleanup(struct address_space *mapping, void *priv)
74 {
75 	struct p9_fid *fid = priv;
76 
77 	p9_client_clunk(fid);
78 }
79 
80 /**
81  * v9fs_begin_cache_operation - Begin a cache operation for a read
82  * @rreq: The read request
83  */
84 static int v9fs_begin_cache_operation(struct netfs_io_request *rreq)
85 {
86 #ifdef CONFIG_9P_FSCACHE
87 	struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
88 
89 	return fscache_begin_read_operation(&rreq->cache_resources, cookie);
90 #else
91 	return -ENOBUFS;
92 #endif
93 }
94 
95 const struct netfs_request_ops v9fs_req_ops = {
96 	.init_request		= v9fs_init_request,
97 	.begin_cache_operation	= v9fs_begin_cache_operation,
98 	.issue_read		= v9fs_issue_read,
99 	.cleanup		= v9fs_req_cleanup,
100 };
101 
102 /**
103  * v9fs_release_folio - release the private state associated with a folio
104  * @folio: The folio to be released
105  * @gfp: The caller's allocation restrictions
106  *
107  * Returns true if the page can be released, false otherwise.
108  */
109 
110 static bool v9fs_release_folio(struct folio *folio, gfp_t gfp)
111 {
112 	struct inode *inode = folio_inode(folio);
113 
114 	if (folio_test_private(folio))
115 		return false;
116 #ifdef CONFIG_9P_FSCACHE
117 	if (folio_test_fscache(folio)) {
118 		if (current_is_kswapd() || !(gfp & __GFP_FS))
119 			return false;
120 		folio_wait_fscache(folio);
121 	}
122 #endif
123 	fscache_note_page_release(v9fs_inode_cookie(V9FS_I(inode)));
124 	return true;
125 }
126 
127 static void v9fs_invalidate_folio(struct folio *folio, size_t offset,
128 				 size_t length)
129 {
130 	folio_wait_fscache(folio);
131 }
132 
133 static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
134 				     bool was_async)
135 {
136 	struct v9fs_inode *v9inode = priv;
137 	__le32 version;
138 
139 	if (IS_ERR_VALUE(transferred_or_error) &&
140 	    transferred_or_error != -ENOBUFS) {
141 		version = cpu_to_le32(v9inode->qid.version);
142 		fscache_invalidate(v9fs_inode_cookie(v9inode), &version,
143 				   i_size_read(&v9inode->vfs_inode), 0);
144 	}
145 }
146 
147 static int v9fs_vfs_write_folio_locked(struct folio *folio)
148 {
149 	struct inode *inode = folio_inode(folio);
150 	struct v9fs_inode *v9inode = V9FS_I(inode);
151 	struct fscache_cookie *cookie = v9fs_inode_cookie(v9inode);
152 	loff_t start = folio_pos(folio);
153 	loff_t i_size = i_size_read(inode);
154 	struct iov_iter from;
155 	size_t len = folio_size(folio);
156 	int err;
157 
158 	if (start >= i_size)
159 		return 0; /* Simultaneous truncation occurred */
160 
161 	len = min_t(loff_t, i_size - start, len);
162 
163 	iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len);
164 
165 	/* We should have writeback_fid always set */
166 	BUG_ON(!v9inode->writeback_fid);
167 
168 	folio_wait_fscache(folio);
169 	folio_start_writeback(folio);
170 
171 	p9_client_write(v9inode->writeback_fid, start, &from, &err);
172 
173 	if (err == 0 &&
174 	    fscache_cookie_enabled(cookie) &&
175 	    test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) {
176 		folio_start_fscache(folio);
177 		fscache_write_to_cache(v9fs_inode_cookie(v9inode),
178 				       folio_mapping(folio), start, len, i_size,
179 				       v9fs_write_to_cache_done, v9inode,
180 				       true);
181 	}
182 
183 	folio_end_writeback(folio);
184 	return err;
185 }
186 
187 static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
188 {
189 	struct folio *folio = page_folio(page);
190 	int retval;
191 
192 	p9_debug(P9_DEBUG_VFS, "folio %p\n", folio);
193 
194 	retval = v9fs_vfs_write_folio_locked(folio);
195 	if (retval < 0) {
196 		if (retval == -EAGAIN) {
197 			folio_redirty_for_writepage(wbc, folio);
198 			retval = 0;
199 		} else {
200 			mapping_set_error(folio_mapping(folio), retval);
201 		}
202 	} else
203 		retval = 0;
204 
205 	folio_unlock(folio);
206 	return retval;
207 }
208 
209 static int v9fs_launder_folio(struct folio *folio)
210 {
211 	int retval;
212 
213 	if (folio_clear_dirty_for_io(folio)) {
214 		retval = v9fs_vfs_write_folio_locked(folio);
215 		if (retval)
216 			return retval;
217 	}
218 	folio_wait_fscache(folio);
219 	return 0;
220 }
221 
222 /**
223  * v9fs_direct_IO - 9P address space operation for direct I/O
224  * @iocb: target I/O control block
225  * @iter: The data/buffer to use
226  *
227  * The presence of v9fs_direct_IO() in the address space ops vector
228  * allowes open() O_DIRECT flags which would have failed otherwise.
229  *
230  * In the non-cached mode, we shunt off direct read and write requests before
231  * the VFS gets them, so this method should never be called.
232  *
233  * Direct IO is not 'yet' supported in the cached mode. Hence when
234  * this routine is called through generic_file_aio_read(), the read/write fails
235  * with an error.
236  *
237  */
238 static ssize_t
239 v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
240 {
241 	struct file *file = iocb->ki_filp;
242 	loff_t pos = iocb->ki_pos;
243 	ssize_t n;
244 	int err = 0;
245 
246 	if (iov_iter_rw(iter) == WRITE) {
247 		n = p9_client_write(file->private_data, pos, iter, &err);
248 		if (n) {
249 			struct inode *inode = file_inode(file);
250 			loff_t i_size = i_size_read(inode);
251 
252 			if (pos + n > i_size)
253 				inode_add_bytes(inode, pos + n - i_size);
254 		}
255 	} else {
256 		n = p9_client_read(file->private_data, pos, iter, &err);
257 	}
258 	return n ? n : err;
259 }
260 
261 static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
262 			    loff_t pos, unsigned int len,
263 			    struct page **subpagep, void **fsdata)
264 {
265 	int retval;
266 	struct folio *folio;
267 	struct v9fs_inode *v9inode = V9FS_I(mapping->host);
268 
269 	p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
270 
271 	BUG_ON(!v9inode->writeback_fid);
272 
273 	/* Prefetch area to be written into the cache if we're caching this
274 	 * file.  We need to do this before we get a lock on the page in case
275 	 * there's more than one writer competing for the same cache block.
276 	 */
277 	retval = netfs_write_begin(filp, mapping, pos, len, &folio, fsdata);
278 	if (retval < 0)
279 		return retval;
280 
281 	*subpagep = &folio->page;
282 	return retval;
283 }
284 
285 static int v9fs_write_end(struct file *filp, struct address_space *mapping,
286 			  loff_t pos, unsigned int len, unsigned int copied,
287 			  struct page *subpage, void *fsdata)
288 {
289 	loff_t last_pos = pos + copied;
290 	struct folio *folio = page_folio(subpage);
291 	struct inode *inode = mapping->host;
292 	struct v9fs_inode *v9inode = V9FS_I(inode);
293 
294 	p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
295 
296 	if (!folio_test_uptodate(folio)) {
297 		if (unlikely(copied < len)) {
298 			copied = 0;
299 			goto out;
300 		}
301 
302 		folio_mark_uptodate(folio);
303 	}
304 
305 	/*
306 	 * No need to use i_size_read() here, the i_size
307 	 * cannot change under us because we hold the i_mutex.
308 	 */
309 	if (last_pos > inode->i_size) {
310 		inode_add_bytes(inode, last_pos - inode->i_size);
311 		i_size_write(inode, last_pos);
312 		fscache_update_cookie(v9fs_inode_cookie(v9inode), NULL, &last_pos);
313 	}
314 	folio_mark_dirty(folio);
315 out:
316 	folio_unlock(folio);
317 	folio_put(folio);
318 
319 	return copied;
320 }
321 
322 #ifdef CONFIG_9P_FSCACHE
323 /*
324  * Mark a page as having been made dirty and thus needing writeback.  We also
325  * need to pin the cache object to write back to.
326  */
327 static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio)
328 {
329 	struct v9fs_inode *v9inode = V9FS_I(mapping->host);
330 
331 	return fscache_dirty_folio(mapping, folio, v9fs_inode_cookie(v9inode));
332 }
333 #else
334 #define v9fs_dirty_folio filemap_dirty_folio
335 #endif
336 
337 const struct address_space_operations v9fs_addr_operations = {
338 	.read_folio = netfs_read_folio,
339 	.readahead = netfs_readahead,
340 	.dirty_folio = v9fs_dirty_folio,
341 	.writepage = v9fs_vfs_writepage,
342 	.write_begin = v9fs_write_begin,
343 	.write_end = v9fs_write_end,
344 	.release_folio = v9fs_release_folio,
345 	.invalidate_folio = v9fs_invalidate_folio,
346 	.launder_folio = v9fs_launder_folio,
347 	.direct_IO = v9fs_direct_IO,
348 };
349