xref: /openbmc/linux/fs/9p/vfs_addr.c (revision 20e8ef5c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file contians vfs address (mmap) ops for 9P2000.
4  *
5  *  Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
6  *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/errno.h>
11 #include <linux/fs.h>
12 #include <linux/file.h>
13 #include <linux/stat.h>
14 #include <linux/string.h>
15 #include <linux/inet.h>
16 #include <linux/pagemap.h>
17 #include <linux/idr.h>
18 #include <linux/sched.h>
19 #include <linux/swap.h>
20 #include <linux/uio.h>
21 #include <linux/netfs.h>
22 #include <net/9p/9p.h>
23 #include <net/9p/client.h>
24 
25 #include "v9fs.h"
26 #include "v9fs_vfs.h"
27 #include "cache.h"
28 #include "fid.h"
29 
30 /**
31  * v9fs_req_issue_op - Issue a read from 9P
32  * @subreq: The read to make
33  */
34 static void v9fs_req_issue_op(struct netfs_read_subrequest *subreq)
35 {
36 	struct netfs_read_request *rreq = subreq->rreq;
37 	struct p9_fid *fid = rreq->netfs_priv;
38 	struct iov_iter to;
39 	loff_t pos = subreq->start + subreq->transferred;
40 	size_t len = subreq->len   - subreq->transferred;
41 	int total, err;
42 
43 	iov_iter_xarray(&to, READ, &rreq->mapping->i_pages, pos, len);
44 
45 	total = p9_client_read(fid, pos, &to, &err);
46 
47 	/* if we just extended the file size, any portion not in
48 	 * cache won't be on server and is zeroes */
49 	__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
50 
51 	netfs_subreq_terminated(subreq, err ?: total, false);
52 }
53 
54 /**
55  * v9fs_init_rreq - Initialise a read request
56  * @rreq: The read request
57  * @file: The file being read from
58  */
59 static void v9fs_init_rreq(struct netfs_read_request *rreq, struct file *file)
60 {
61 	struct p9_fid *fid = file->private_data;
62 
63 	refcount_inc(&fid->count);
64 	rreq->netfs_priv = fid;
65 }
66 
67 /**
68  * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_rreq
69  * @mapping: unused mapping of request to cleanup
70  * @priv: private data to cleanup, a fid, guaranted non-null.
71  */
72 static void v9fs_req_cleanup(struct address_space *mapping, void *priv)
73 {
74 	struct p9_fid *fid = priv;
75 
76 	p9_client_clunk(fid);
77 }
78 
79 /**
80  * v9fs_is_cache_enabled - Determine if caching is enabled for an inode
81  * @inode: The inode to check
82  */
83 static bool v9fs_is_cache_enabled(struct inode *inode)
84 {
85 	struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(inode));
86 
87 	return fscache_cookie_enabled(cookie) && cookie->cache_priv;
88 }
89 
90 /**
91  * v9fs_begin_cache_operation - Begin a cache operation for a read
92  * @rreq: The read request
93  */
94 static int v9fs_begin_cache_operation(struct netfs_read_request *rreq)
95 {
96 #ifdef CONFIG_9P_FSCACHE
97 	struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
98 
99 	return fscache_begin_read_operation(&rreq->cache_resources, cookie);
100 #else
101 	return -ENOBUFS;
102 #endif
103 }
104 
105 static const struct netfs_read_request_ops v9fs_req_ops = {
106 	.init_rreq		= v9fs_init_rreq,
107 	.is_cache_enabled	= v9fs_is_cache_enabled,
108 	.begin_cache_operation	= v9fs_begin_cache_operation,
109 	.issue_op		= v9fs_req_issue_op,
110 	.cleanup		= v9fs_req_cleanup,
111 };
112 
113 /**
114  * v9fs_vfs_readpage - read an entire page in from 9P
115  * @file: file being read
116  * @page: structure to page
117  *
118  */
119 static int v9fs_vfs_readpage(struct file *file, struct page *page)
120 {
121 	struct folio *folio = page_folio(page);
122 
123 	return netfs_readpage(file, folio, &v9fs_req_ops, NULL);
124 }
125 
126 /**
127  * v9fs_vfs_readahead - read a set of pages from 9P
128  * @ractl: The readahead parameters
129  */
130 static void v9fs_vfs_readahead(struct readahead_control *ractl)
131 {
132 	netfs_readahead(ractl, &v9fs_req_ops, NULL);
133 }
134 
135 /**
136  * v9fs_release_page - release the private state associated with a page
137  * @page: The page to be released
138  * @gfp: The caller's allocation restrictions
139  *
140  * Returns 1 if the page can be released, false otherwise.
141  */
142 
143 static int v9fs_release_page(struct page *page, gfp_t gfp)
144 {
145 	struct folio *folio = page_folio(page);
146 	struct inode *inode = folio_inode(folio);
147 
148 	if (folio_test_private(folio))
149 		return 0;
150 #ifdef CONFIG_9P_FSCACHE
151 	if (folio_test_fscache(folio)) {
152 		if (current_is_kswapd() || !(gfp & __GFP_FS))
153 			return 0;
154 		folio_wait_fscache(folio);
155 	}
156 #endif
157 	fscache_note_page_release(v9fs_inode_cookie(V9FS_I(inode)));
158 	return 1;
159 }
160 
161 /**
162  * v9fs_invalidate_page - Invalidate a page completely or partially
163  * @page: The page to be invalidated
164  * @offset: offset of the invalidated region
165  * @length: length of the invalidated region
166  */
167 
168 static void v9fs_invalidate_page(struct page *page, unsigned int offset,
169 				 unsigned int length)
170 {
171 	struct folio *folio = page_folio(page);
172 
173 	folio_wait_fscache(folio);
174 }
175 
176 static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
177 				     bool was_async)
178 {
179 	struct v9fs_inode *v9inode = priv;
180 	__le32 version;
181 
182 	if (IS_ERR_VALUE(transferred_or_error) &&
183 	    transferred_or_error != -ENOBUFS) {
184 		version = cpu_to_le32(v9inode->qid.version);
185 		fscache_invalidate(v9fs_inode_cookie(v9inode), &version,
186 				   i_size_read(&v9inode->vfs_inode), 0);
187 	}
188 }
189 
190 static int v9fs_vfs_write_folio_locked(struct folio *folio)
191 {
192 	struct inode *inode = folio_inode(folio);
193 	struct v9fs_inode *v9inode = V9FS_I(inode);
194 	struct fscache_cookie *cookie = v9fs_inode_cookie(v9inode);
195 	loff_t start = folio_pos(folio);
196 	loff_t i_size = i_size_read(inode);
197 	struct iov_iter from;
198 	size_t len = folio_size(folio);
199 	int err;
200 
201 	if (start >= i_size)
202 		return 0; /* Simultaneous truncation occurred */
203 
204 	len = min_t(loff_t, i_size - start, len);
205 
206 	iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len);
207 
208 	/* We should have writeback_fid always set */
209 	BUG_ON(!v9inode->writeback_fid);
210 
211 	folio_wait_fscache(folio);
212 	folio_start_writeback(folio);
213 
214 	p9_client_write(v9inode->writeback_fid, start, &from, &err);
215 
216 	if (err == 0 &&
217 	    fscache_cookie_enabled(cookie) &&
218 	    test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) {
219 		folio_start_fscache(folio);
220 		fscache_write_to_cache(v9fs_inode_cookie(v9inode),
221 				       folio_mapping(folio), start, len, i_size,
222 				       v9fs_write_to_cache_done, v9inode,
223 				       true);
224 	}
225 
226 	folio_end_writeback(folio);
227 	return err;
228 }
229 
230 static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
231 {
232 	struct folio *folio = page_folio(page);
233 	int retval;
234 
235 	p9_debug(P9_DEBUG_VFS, "folio %p\n", folio);
236 
237 	retval = v9fs_vfs_write_folio_locked(folio);
238 	if (retval < 0) {
239 		if (retval == -EAGAIN) {
240 			folio_redirty_for_writepage(wbc, folio);
241 			retval = 0;
242 		} else {
243 			mapping_set_error(folio_mapping(folio), retval);
244 		}
245 	} else
246 		retval = 0;
247 
248 	folio_unlock(folio);
249 	return retval;
250 }
251 
252 /**
253  * v9fs_launder_page - Writeback a dirty page
254  * @page: The page to be cleaned up
255  *
256  * Returns 0 on success.
257  */
258 
259 static int v9fs_launder_page(struct page *page)
260 {
261 	struct folio *folio = page_folio(page);
262 	int retval;
263 
264 	if (folio_clear_dirty_for_io(folio)) {
265 		retval = v9fs_vfs_write_folio_locked(folio);
266 		if (retval)
267 			return retval;
268 	}
269 	folio_wait_fscache(folio);
270 	return 0;
271 }
272 
273 /**
274  * v9fs_direct_IO - 9P address space operation for direct I/O
275  * @iocb: target I/O control block
276  * @iter: The data/buffer to use
277  *
278  * The presence of v9fs_direct_IO() in the address space ops vector
279  * allowes open() O_DIRECT flags which would have failed otherwise.
280  *
281  * In the non-cached mode, we shunt off direct read and write requests before
282  * the VFS gets them, so this method should never be called.
283  *
284  * Direct IO is not 'yet' supported in the cached mode. Hence when
285  * this routine is called through generic_file_aio_read(), the read/write fails
286  * with an error.
287  *
288  */
289 static ssize_t
290 v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
291 {
292 	struct file *file = iocb->ki_filp;
293 	loff_t pos = iocb->ki_pos;
294 	ssize_t n;
295 	int err = 0;
296 
297 	if (iov_iter_rw(iter) == WRITE) {
298 		n = p9_client_write(file->private_data, pos, iter, &err);
299 		if (n) {
300 			struct inode *inode = file_inode(file);
301 			loff_t i_size = i_size_read(inode);
302 
303 			if (pos + n > i_size)
304 				inode_add_bytes(inode, pos + n - i_size);
305 		}
306 	} else {
307 		n = p9_client_read(file->private_data, pos, iter, &err);
308 	}
309 	return n ? n : err;
310 }
311 
312 static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
313 			    loff_t pos, unsigned int len, unsigned int flags,
314 			    struct page **subpagep, void **fsdata)
315 {
316 	int retval;
317 	struct folio *folio;
318 	struct v9fs_inode *v9inode = V9FS_I(mapping->host);
319 
320 	p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
321 
322 	BUG_ON(!v9inode->writeback_fid);
323 
324 	/* Prefetch area to be written into the cache if we're caching this
325 	 * file.  We need to do this before we get a lock on the page in case
326 	 * there's more than one writer competing for the same cache block.
327 	 */
328 	retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata,
329 				   &v9fs_req_ops, NULL);
330 	if (retval < 0)
331 		return retval;
332 
333 	*subpagep = &folio->page;
334 	return retval;
335 }
336 
337 static int v9fs_write_end(struct file *filp, struct address_space *mapping,
338 			  loff_t pos, unsigned int len, unsigned int copied,
339 			  struct page *subpage, void *fsdata)
340 {
341 	loff_t last_pos = pos + copied;
342 	struct folio *folio = page_folio(subpage);
343 	struct inode *inode = mapping->host;
344 	struct v9fs_inode *v9inode = V9FS_I(inode);
345 
346 	p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
347 
348 	if (!folio_test_uptodate(folio)) {
349 		if (unlikely(copied < len)) {
350 			copied = 0;
351 			goto out;
352 		}
353 
354 		folio_mark_uptodate(folio);
355 	}
356 
357 	/*
358 	 * No need to use i_size_read() here, the i_size
359 	 * cannot change under us because we hold the i_mutex.
360 	 */
361 	if (last_pos > inode->i_size) {
362 		inode_add_bytes(inode, last_pos - inode->i_size);
363 		i_size_write(inode, last_pos);
364 		fscache_update_cookie(v9fs_inode_cookie(v9inode), NULL, &last_pos);
365 	}
366 	folio_mark_dirty(folio);
367 out:
368 	folio_unlock(folio);
369 	folio_put(folio);
370 
371 	return copied;
372 }
373 
374 #ifdef CONFIG_9P_FSCACHE
375 /*
376  * Mark a page as having been made dirty and thus needing writeback.  We also
377  * need to pin the cache object to write back to.
378  */
379 static int v9fs_set_page_dirty(struct page *page)
380 {
381 	struct v9fs_inode *v9inode = V9FS_I(page->mapping->host);
382 
383 	return fscache_set_page_dirty(page, v9fs_inode_cookie(v9inode));
384 }
385 #else
386 #define v9fs_set_page_dirty __set_page_dirty_nobuffers
387 #endif
388 
389 const struct address_space_operations v9fs_addr_operations = {
390 	.readpage = v9fs_vfs_readpage,
391 	.readahead = v9fs_vfs_readahead,
392 	.set_page_dirty = v9fs_set_page_dirty,
393 	.writepage = v9fs_vfs_writepage,
394 	.write_begin = v9fs_write_begin,
395 	.write_end = v9fs_write_end,
396 	.releasepage = v9fs_release_page,
397 	.invalidatepage = v9fs_invalidate_page,
398 	.launder_page = v9fs_launder_page,
399 	.direct_IO = v9fs_direct_IO,
400 };
401