xref: /openbmc/linux/fs/9p/vfs_addr.c (revision c4a11bf4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file contians vfs address (mmap) ops for 9P2000.
4  *
5  *  Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
6  *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/errno.h>
11 #include <linux/fs.h>
12 #include <linux/file.h>
13 #include <linux/stat.h>
14 #include <linux/string.h>
15 #include <linux/inet.h>
16 #include <linux/pagemap.h>
17 #include <linux/idr.h>
18 #include <linux/sched.h>
19 #include <linux/uio.h>
20 #include <linux/netfs.h>
21 #include <net/9p/9p.h>
22 #include <net/9p/client.h>
23 
24 #include "v9fs.h"
25 #include "v9fs_vfs.h"
26 #include "cache.h"
27 #include "fid.h"
28 
29 /**
30  * v9fs_req_issue_op - Issue a read from 9P
31  * @subreq: The read to make
32  */
33 static void v9fs_req_issue_op(struct netfs_read_subrequest *subreq)
34 {
35 	struct netfs_read_request *rreq = subreq->rreq;
36 	struct p9_fid *fid = rreq->netfs_priv;
37 	struct iov_iter to;
38 	loff_t pos = subreq->start + subreq->transferred;
39 	size_t len = subreq->len   - subreq->transferred;
40 	int total, err;
41 
42 	iov_iter_xarray(&to, READ, &rreq->mapping->i_pages, pos, len);
43 
44 	total = p9_client_read(fid, pos, &to, &err);
45 	netfs_subreq_terminated(subreq, err ?: total, false);
46 }
47 
48 /**
49  * v9fs_init_rreq - Initialise a read request
50  * @rreq: The read request
51  * @file: The file being read from
52  */
53 static void v9fs_init_rreq(struct netfs_read_request *rreq, struct file *file)
54 {
55 	struct p9_fid *fid = file->private_data;
56 
57 	refcount_inc(&fid->count);
58 	rreq->netfs_priv = fid;
59 }
60 
61 /**
62  * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_rreq
63  * @mapping: unused mapping of request to cleanup
64  * @priv: private data to cleanup, a fid, guaranted non-null.
65  */
66 static void v9fs_req_cleanup(struct address_space *mapping, void *priv)
67 {
68 	struct p9_fid *fid = priv;
69 
70 	p9_client_clunk(fid);
71 }
72 
73 /**
74  * v9fs_is_cache_enabled - Determine if caching is enabled for an inode
75  * @inode: The inode to check
76  */
77 static bool v9fs_is_cache_enabled(struct inode *inode)
78 {
79 	struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(inode));
80 
81 	return fscache_cookie_enabled(cookie) && !hlist_empty(&cookie->backing_objects);
82 }
83 
84 /**
85  * v9fs_begin_cache_operation - Begin a cache operation for a read
86  * @rreq: The read request
87  */
88 static int v9fs_begin_cache_operation(struct netfs_read_request *rreq)
89 {
90 	struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
91 
92 	return fscache_begin_read_operation(rreq, cookie);
93 }
94 
95 static const struct netfs_read_request_ops v9fs_req_ops = {
96 	.init_rreq		= v9fs_init_rreq,
97 	.is_cache_enabled	= v9fs_is_cache_enabled,
98 	.begin_cache_operation	= v9fs_begin_cache_operation,
99 	.issue_op		= v9fs_req_issue_op,
100 	.cleanup		= v9fs_req_cleanup,
101 };
102 
103 /**
104  * v9fs_vfs_readpage - read an entire page in from 9P
105  * @file: file being read
106  * @page: structure to page
107  *
108  */
109 static int v9fs_vfs_readpage(struct file *file, struct page *page)
110 {
111 	return netfs_readpage(file, page, &v9fs_req_ops, NULL);
112 }
113 
114 /**
115  * v9fs_vfs_readahead - read a set of pages from 9P
116  * @ractl: The readahead parameters
117  */
118 static void v9fs_vfs_readahead(struct readahead_control *ractl)
119 {
120 	netfs_readahead(ractl, &v9fs_req_ops, NULL);
121 }
122 
123 /**
124  * v9fs_release_page - release the private state associated with a page
125  * @page: The page to be released
126  * @gfp: The caller's allocation restrictions
127  *
128  * Returns 1 if the page can be released, false otherwise.
129  */
130 
131 static int v9fs_release_page(struct page *page, gfp_t gfp)
132 {
133 	if (PagePrivate(page))
134 		return 0;
135 #ifdef CONFIG_9P_FSCACHE
136 	if (PageFsCache(page)) {
137 		if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS))
138 			return 0;
139 		wait_on_page_fscache(page);
140 	}
141 #endif
142 	return 1;
143 }
144 
145 /**
146  * v9fs_invalidate_page - Invalidate a page completely or partially
147  * @page: The page to be invalidated
148  * @offset: offset of the invalidated region
149  * @length: length of the invalidated region
150  */
151 
152 static void v9fs_invalidate_page(struct page *page, unsigned int offset,
153 				 unsigned int length)
154 {
155 	wait_on_page_fscache(page);
156 }
157 
158 static int v9fs_vfs_writepage_locked(struct page *page)
159 {
160 	struct inode *inode = page->mapping->host;
161 	struct v9fs_inode *v9inode = V9FS_I(inode);
162 	loff_t start = page_offset(page);
163 	loff_t size = i_size_read(inode);
164 	struct iov_iter from;
165 	int err, len;
166 
167 	if (page->index == size >> PAGE_SHIFT)
168 		len = size & ~PAGE_MASK;
169 	else
170 		len = PAGE_SIZE;
171 
172 	iov_iter_xarray(&from, WRITE, &page->mapping->i_pages, start, len);
173 
174 	/* We should have writeback_fid always set */
175 	BUG_ON(!v9inode->writeback_fid);
176 
177 	set_page_writeback(page);
178 
179 	p9_client_write(v9inode->writeback_fid, start, &from, &err);
180 
181 	end_page_writeback(page);
182 	return err;
183 }
184 
185 static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
186 {
187 	int retval;
188 
189 	p9_debug(P9_DEBUG_VFS, "page %p\n", page);
190 
191 	retval = v9fs_vfs_writepage_locked(page);
192 	if (retval < 0) {
193 		if (retval == -EAGAIN) {
194 			redirty_page_for_writepage(wbc, page);
195 			retval = 0;
196 		} else {
197 			SetPageError(page);
198 			mapping_set_error(page->mapping, retval);
199 		}
200 	} else
201 		retval = 0;
202 
203 	unlock_page(page);
204 	return retval;
205 }
206 
207 /**
208  * v9fs_launder_page - Writeback a dirty page
209  * @page: The page to be cleaned up
210  *
211  * Returns 0 on success.
212  */
213 
214 static int v9fs_launder_page(struct page *page)
215 {
216 	int retval;
217 
218 	if (clear_page_dirty_for_io(page)) {
219 		retval = v9fs_vfs_writepage_locked(page);
220 		if (retval)
221 			return retval;
222 	}
223 	wait_on_page_fscache(page);
224 	return 0;
225 }
226 
227 /**
228  * v9fs_direct_IO - 9P address space operation for direct I/O
229  * @iocb: target I/O control block
230  * @iter: The data/buffer to use
231  *
232  * The presence of v9fs_direct_IO() in the address space ops vector
233  * allowes open() O_DIRECT flags which would have failed otherwise.
234  *
235  * In the non-cached mode, we shunt off direct read and write requests before
236  * the VFS gets them, so this method should never be called.
237  *
238  * Direct IO is not 'yet' supported in the cached mode. Hence when
239  * this routine is called through generic_file_aio_read(), the read/write fails
240  * with an error.
241  *
242  */
243 static ssize_t
244 v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
245 {
246 	struct file *file = iocb->ki_filp;
247 	loff_t pos = iocb->ki_pos;
248 	ssize_t n;
249 	int err = 0;
250 
251 	if (iov_iter_rw(iter) == WRITE) {
252 		n = p9_client_write(file->private_data, pos, iter, &err);
253 		if (n) {
254 			struct inode *inode = file_inode(file);
255 			loff_t i_size = i_size_read(inode);
256 
257 			if (pos + n > i_size)
258 				inode_add_bytes(inode, pos + n - i_size);
259 		}
260 	} else {
261 		n = p9_client_read(file->private_data, pos, iter, &err);
262 	}
263 	return n ? n : err;
264 }
265 
266 static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
267 			    loff_t pos, unsigned int len, unsigned int flags,
268 			    struct page **pagep, void **fsdata)
269 {
270 	int retval;
271 	struct page *page;
272 	struct v9fs_inode *v9inode = V9FS_I(mapping->host);
273 
274 	p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
275 
276 	BUG_ON(!v9inode->writeback_fid);
277 
278 	/* Prefetch area to be written into the cache if we're caching this
279 	 * file.  We need to do this before we get a lock on the page in case
280 	 * there's more than one writer competing for the same cache block.
281 	 */
282 	retval = netfs_write_begin(filp, mapping, pos, len, flags, &page, fsdata,
283 				   &v9fs_req_ops, NULL);
284 	if (retval < 0)
285 		return retval;
286 
287 	*pagep = find_subpage(page, pos / PAGE_SIZE);
288 	return retval;
289 }
290 
291 static int v9fs_write_end(struct file *filp, struct address_space *mapping,
292 			  loff_t pos, unsigned int len, unsigned int copied,
293 			  struct page *page, void *fsdata)
294 {
295 	loff_t last_pos = pos + copied;
296 	struct inode *inode = page->mapping->host;
297 
298 	p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
299 
300 	if (!PageUptodate(page)) {
301 		if (unlikely(copied < len)) {
302 			copied = 0;
303 			goto out;
304 		}
305 
306 		SetPageUptodate(page);
307 	}
308 
309 	/*
310 	 * No need to use i_size_read() here, the i_size
311 	 * cannot change under us because we hold the i_mutex.
312 	 */
313 	if (last_pos > inode->i_size) {
314 		inode_add_bytes(inode, last_pos - inode->i_size);
315 		i_size_write(inode, last_pos);
316 	}
317 	set_page_dirty(page);
318 out:
319 	unlock_page(page);
320 	put_page(page);
321 
322 	return copied;
323 }
324 
325 
326 const struct address_space_operations v9fs_addr_operations = {
327 	.readpage = v9fs_vfs_readpage,
328 	.readahead = v9fs_vfs_readahead,
329 	.set_page_dirty = __set_page_dirty_nobuffers,
330 	.writepage = v9fs_vfs_writepage,
331 	.write_begin = v9fs_write_begin,
332 	.write_end = v9fs_write_end,
333 	.releasepage = v9fs_release_page,
334 	.invalidatepage = v9fs_invalidate_page,
335 	.launder_page = v9fs_launder_page,
336 	.direct_IO = v9fs_direct_IO,
337 };
338