xref: /openbmc/linux/fs/9p/vfs_addr.c (revision 0d456bad)
1 /*
2  *  linux/fs/9p/vfs_addr.c
3  *
4  * This file contians vfs address (mmap) ops for 9P2000.
5  *
6  *  Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
7  *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
8  *
9  *  This program is free software; you can redistribute it and/or modify
10  *  it under the terms of the GNU General Public License version 2
11  *  as published by the Free Software Foundation.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, write to:
20  *  Free Software Foundation
21  *  51 Franklin Street, Fifth Floor
22  *  Boston, MA  02111-1301  USA
23  *
24  */
25 
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/fs.h>
29 #include <linux/file.h>
30 #include <linux/stat.h>
31 #include <linux/string.h>
32 #include <linux/inet.h>
33 #include <linux/pagemap.h>
34 #include <linux/idr.h>
35 #include <linux/sched.h>
36 #include <net/9p/9p.h>
37 #include <net/9p/client.h>
38 
39 #include "v9fs.h"
40 #include "v9fs_vfs.h"
41 #include "cache.h"
42 #include "fid.h"
43 
44 /**
45  * v9fs_fid_readpage - read an entire page in from 9P
46  *
47  * @fid: fid being read
48  * @page: structure to page
49  *
50  */
51 static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
52 {
53 	int retval;
54 	loff_t offset;
55 	char *buffer;
56 	struct inode *inode;
57 
58 	inode = page->mapping->host;
59 	p9_debug(P9_DEBUG_VFS, "\n");
60 
61 	BUG_ON(!PageLocked(page));
62 
63 	retval = v9fs_readpage_from_fscache(inode, page);
64 	if (retval == 0)
65 		return retval;
66 
67 	buffer = kmap(page);
68 	offset = page_offset(page);
69 
70 	retval = v9fs_fid_readn(fid, buffer, NULL, PAGE_CACHE_SIZE, offset);
71 	if (retval < 0) {
72 		v9fs_uncache_page(inode, page);
73 		goto done;
74 	}
75 
76 	memset(buffer + retval, 0, PAGE_CACHE_SIZE - retval);
77 	flush_dcache_page(page);
78 	SetPageUptodate(page);
79 
80 	v9fs_readpage_to_fscache(inode, page);
81 	retval = 0;
82 
83 done:
84 	kunmap(page);
85 	unlock_page(page);
86 	return retval;
87 }
88 
89 /**
90  * v9fs_vfs_readpage - read an entire page in from 9P
91  *
92  * @filp: file being read
93  * @page: structure to page
94  *
95  */
96 
97 static int v9fs_vfs_readpage(struct file *filp, struct page *page)
98 {
99 	return v9fs_fid_readpage(filp->private_data, page);
100 }
101 
102 /**
103  * v9fs_vfs_readpages - read a set of pages from 9P
104  *
105  * @filp: file being read
106  * @mapping: the address space
107  * @pages: list of pages to read
108  * @nr_pages: count of pages to read
109  *
110  */
111 
112 static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping,
113 			     struct list_head *pages, unsigned nr_pages)
114 {
115 	int ret = 0;
116 	struct inode *inode;
117 
118 	inode = mapping->host;
119 	p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, filp);
120 
121 	ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages);
122 	if (ret == 0)
123 		return ret;
124 
125 	ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp);
126 	p9_debug(P9_DEBUG_VFS, "  = %d\n", ret);
127 	return ret;
128 }
129 
130 /**
131  * v9fs_release_page - release the private state associated with a page
132  *
133  * Returns 1 if the page can be released, false otherwise.
134  */
135 
136 static int v9fs_release_page(struct page *page, gfp_t gfp)
137 {
138 	if (PagePrivate(page))
139 		return 0;
140 	return v9fs_fscache_release_page(page, gfp);
141 }
142 
143 /**
144  * v9fs_invalidate_page - Invalidate a page completely or partially
145  *
146  * @page: structure to page
147  * @offset: offset in the page
148  */
149 
150 static void v9fs_invalidate_page(struct page *page, unsigned long offset)
151 {
152 	/*
153 	 * If called with zero offset, we should release
154 	 * the private state assocated with the page
155 	 */
156 	if (offset == 0)
157 		v9fs_fscache_invalidate_page(page);
158 }
159 
160 static int v9fs_vfs_writepage_locked(struct page *page)
161 {
162 	char *buffer;
163 	int retval, len;
164 	loff_t offset, size;
165 	mm_segment_t old_fs;
166 	struct v9fs_inode *v9inode;
167 	struct inode *inode = page->mapping->host;
168 
169 	v9inode = V9FS_I(inode);
170 	size = i_size_read(inode);
171 	if (page->index == size >> PAGE_CACHE_SHIFT)
172 		len = size & ~PAGE_CACHE_MASK;
173 	else
174 		len = PAGE_CACHE_SIZE;
175 
176 	set_page_writeback(page);
177 
178 	buffer = kmap(page);
179 	offset = page_offset(page);
180 
181 	old_fs = get_fs();
182 	set_fs(get_ds());
183 	/* We should have writeback_fid always set */
184 	BUG_ON(!v9inode->writeback_fid);
185 
186 	retval = v9fs_file_write_internal(inode,
187 					  v9inode->writeback_fid,
188 					  (__force const char __user *)buffer,
189 					  len, &offset, 0);
190 	if (retval > 0)
191 		retval = 0;
192 
193 	set_fs(old_fs);
194 	kunmap(page);
195 	end_page_writeback(page);
196 	return retval;
197 }
198 
199 static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
200 {
201 	int retval;
202 
203 	retval = v9fs_vfs_writepage_locked(page);
204 	if (retval < 0) {
205 		if (retval == -EAGAIN) {
206 			redirty_page_for_writepage(wbc, page);
207 			retval = 0;
208 		} else {
209 			SetPageError(page);
210 			mapping_set_error(page->mapping, retval);
211 		}
212 	} else
213 		retval = 0;
214 
215 	unlock_page(page);
216 	return retval;
217 }
218 
219 /**
220  * v9fs_launder_page - Writeback a dirty page
221  * Returns 0 on success.
222  */
223 
224 static int v9fs_launder_page(struct page *page)
225 {
226 	int retval;
227 	struct inode *inode = page->mapping->host;
228 
229 	v9fs_fscache_wait_on_page_write(inode, page);
230 	if (clear_page_dirty_for_io(page)) {
231 		retval = v9fs_vfs_writepage_locked(page);
232 		if (retval)
233 			return retval;
234 	}
235 	return 0;
236 }
237 
238 /**
239  * v9fs_direct_IO - 9P address space operation for direct I/O
240  * @rw: direction (read or write)
241  * @iocb: target I/O control block
242  * @iov: array of vectors that define I/O buffer
243  * @pos: offset in file to begin the operation
244  * @nr_segs: size of iovec array
245  *
246  * The presence of v9fs_direct_IO() in the address space ops vector
247  * allowes open() O_DIRECT flags which would have failed otherwise.
248  *
249  * In the non-cached mode, we shunt off direct read and write requests before
250  * the VFS gets them, so this method should never be called.
251  *
252  * Direct IO is not 'yet' supported in the cached mode. Hence when
253  * this routine is called through generic_file_aio_read(), the read/write fails
254  * with an error.
255  *
256  */
257 static ssize_t
258 v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
259 	       loff_t pos, unsigned long nr_segs)
260 {
261 	/*
262 	 * FIXME
263 	 * Now that we do caching with cache mode enabled, We need
264 	 * to support direct IO
265 	 */
266 	p9_debug(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) off/no(%lld/%lu) EINVAL\n",
267 		 iocb->ki_filp->f_path.dentry->d_name.name,
268 		 (long long)pos, nr_segs);
269 
270 	return -EINVAL;
271 }
272 
273 static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
274 			    loff_t pos, unsigned len, unsigned flags,
275 			    struct page **pagep, void **fsdata)
276 {
277 	int retval = 0;
278 	struct page *page;
279 	struct v9fs_inode *v9inode;
280 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
281 	struct inode *inode = mapping->host;
282 
283 	v9inode = V9FS_I(inode);
284 start:
285 	page = grab_cache_page_write_begin(mapping, index, flags);
286 	if (!page) {
287 		retval = -ENOMEM;
288 		goto out;
289 	}
290 	BUG_ON(!v9inode->writeback_fid);
291 	if (PageUptodate(page))
292 		goto out;
293 
294 	if (len == PAGE_CACHE_SIZE)
295 		goto out;
296 
297 	retval = v9fs_fid_readpage(v9inode->writeback_fid, page);
298 	page_cache_release(page);
299 	if (!retval)
300 		goto start;
301 out:
302 	*pagep = page;
303 	return retval;
304 }
305 
306 static int v9fs_write_end(struct file *filp, struct address_space *mapping,
307 			  loff_t pos, unsigned len, unsigned copied,
308 			  struct page *page, void *fsdata)
309 {
310 	loff_t last_pos = pos + copied;
311 	struct inode *inode = page->mapping->host;
312 
313 	if (unlikely(copied < len)) {
314 		/*
315 		 * zero out the rest of the area
316 		 */
317 		unsigned from = pos & (PAGE_CACHE_SIZE - 1);
318 
319 		zero_user(page, from + copied, len - copied);
320 		flush_dcache_page(page);
321 	}
322 
323 	if (!PageUptodate(page))
324 		SetPageUptodate(page);
325 	/*
326 	 * No need to use i_size_read() here, the i_size
327 	 * cannot change under us because we hold the i_mutex.
328 	 */
329 	if (last_pos > inode->i_size) {
330 		inode_add_bytes(inode, last_pos - inode->i_size);
331 		i_size_write(inode, last_pos);
332 	}
333 	set_page_dirty(page);
334 	unlock_page(page);
335 	page_cache_release(page);
336 
337 	return copied;
338 }
339 
340 
341 const struct address_space_operations v9fs_addr_operations = {
342 	.readpage = v9fs_vfs_readpage,
343 	.readpages = v9fs_vfs_readpages,
344 	.set_page_dirty = __set_page_dirty_nobuffers,
345 	.writepage = v9fs_vfs_writepage,
346 	.write_begin = v9fs_write_begin,
347 	.write_end = v9fs_write_end,
348 	.releasepage = v9fs_release_page,
349 	.invalidatepage = v9fs_invalidate_page,
350 	.launder_page = v9fs_launder_page,
351 	.direct_IO = v9fs_direct_IO,
352 };
353