xref: /openbmc/linux/fs/9p/vfs_addr.c (revision 54525552)
1 /*
2  *  linux/fs/9p/vfs_addr.c
3  *
4  * This file contians vfs address (mmap) ops for 9P2000.
5  *
6  *  Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
7  *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
8  *
9  *  This program is free software; you can redistribute it and/or modify
10  *  it under the terms of the GNU General Public License version 2
11  *  as published by the Free Software Foundation.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, write to:
20  *  Free Software Foundation
21  *  51 Franklin Street, Fifth Floor
22  *  Boston, MA  02111-1301  USA
23  *
24  */
25 
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/fs.h>
29 #include <linux/file.h>
30 #include <linux/stat.h>
31 #include <linux/string.h>
32 #include <linux/inet.h>
33 #include <linux/pagemap.h>
34 #include <linux/idr.h>
35 #include <linux/sched.h>
36 #include <net/9p/9p.h>
37 #include <net/9p/client.h>
38 
39 #include "v9fs.h"
40 #include "v9fs_vfs.h"
41 #include "cache.h"
42 #include "fid.h"
43 
44 /**
45  * v9fs_fid_readpage - read an entire page in from 9P
46  *
47  * @fid: fid being read
48  * @page: structure to page
49  *
50  */
51 static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
52 {
53 	int retval;
54 	loff_t offset;
55 	char *buffer;
56 	struct inode *inode;
57 
58 	inode = page->mapping->host;
59 	P9_DPRINTK(P9_DEBUG_VFS, "\n");
60 
61 	BUG_ON(!PageLocked(page));
62 
63 	retval = v9fs_readpage_from_fscache(inode, page);
64 	if (retval == 0)
65 		return retval;
66 
67 	buffer = kmap(page);
68 	offset = page_offset(page);
69 
70 	retval = v9fs_fid_readn(fid, buffer, NULL, PAGE_CACHE_SIZE, offset);
71 	if (retval < 0) {
72 		v9fs_uncache_page(inode, page);
73 		goto done;
74 	}
75 
76 	memset(buffer + retval, 0, PAGE_CACHE_SIZE - retval);
77 	flush_dcache_page(page);
78 	SetPageUptodate(page);
79 
80 	v9fs_readpage_to_fscache(inode, page);
81 	retval = 0;
82 
83 done:
84 	kunmap(page);
85 	unlock_page(page);
86 	return retval;
87 }
88 
89 /**
90  * v9fs_vfs_readpage - read an entire page in from 9P
91  *
92  * @filp: file being read
93  * @page: structure to page
94  *
95  */
96 
97 static int v9fs_vfs_readpage(struct file *filp, struct page *page)
98 {
99 	return v9fs_fid_readpage(filp->private_data, page);
100 }
101 
102 /**
103  * v9fs_vfs_readpages - read a set of pages from 9P
104  *
105  * @filp: file being read
106  * @mapping: the address space
107  * @pages: list of pages to read
108  * @nr_pages: count of pages to read
109  *
110  */
111 
112 static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping,
113 			     struct list_head *pages, unsigned nr_pages)
114 {
115 	int ret = 0;
116 	struct inode *inode;
117 
118 	inode = mapping->host;
119 	P9_DPRINTK(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, filp);
120 
121 	ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages);
122 	if (ret == 0)
123 		return ret;
124 
125 	ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp);
126 	P9_DPRINTK(P9_DEBUG_VFS, "  = %d\n", ret);
127 	return ret;
128 }
129 
130 /**
131  * v9fs_release_page - release the private state associated with a page
132  *
133  * Returns 1 if the page can be released, false otherwise.
134  */
135 
136 static int v9fs_release_page(struct page *page, gfp_t gfp)
137 {
138 	if (PagePrivate(page))
139 		return 0;
140 	return v9fs_fscache_release_page(page, gfp);
141 }
142 
143 /**
144  * v9fs_invalidate_page - Invalidate a page completely or partially
145  *
146  * @page: structure to page
147  * @offset: offset in the page
148  */
149 
150 static void v9fs_invalidate_page(struct page *page, unsigned long offset)
151 {
152 	/*
153 	 * If called with zero offset, we should release
154 	 * the private state assocated with the page
155 	 */
156 	if (offset == 0)
157 		v9fs_fscache_invalidate_page(page);
158 }
159 
160 static int v9fs_vfs_writepage_locked(struct page *page)
161 {
162 	char *buffer;
163 	int retval, len;
164 	loff_t offset, size;
165 	mm_segment_t old_fs;
166 	struct v9fs_inode *v9inode;
167 	struct inode *inode = page->mapping->host;
168 
169 	v9inode = V9FS_I(inode);
170 	size = i_size_read(inode);
171 	if (page->index == size >> PAGE_CACHE_SHIFT)
172 		len = size & ~PAGE_CACHE_MASK;
173 	else
174 		len = PAGE_CACHE_SIZE;
175 
176 	set_page_writeback(page);
177 
178 	buffer = kmap(page);
179 	offset = page_offset(page);
180 
181 	old_fs = get_fs();
182 	set_fs(get_ds());
183 	/* We should have writeback_fid always set */
184 	BUG_ON(!v9inode->writeback_fid);
185 
186 	retval = v9fs_file_write_internal(inode,
187 					  v9inode->writeback_fid,
188 					  (__force const char __user *)buffer,
189 					  len, &offset, 0);
190 	if (retval > 0)
191 		retval = 0;
192 
193 	set_fs(old_fs);
194 	kunmap(page);
195 	end_page_writeback(page);
196 	return retval;
197 }
198 
199 static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
200 {
201 	int retval;
202 
203 	retval = v9fs_vfs_writepage_locked(page);
204 	if (retval < 0) {
205 		if (retval == -EAGAIN) {
206 			redirty_page_for_writepage(wbc, page);
207 			retval = 0;
208 		} else {
209 			SetPageError(page);
210 			mapping_set_error(page->mapping, retval);
211 		}
212 	} else
213 		retval = 0;
214 
215 	unlock_page(page);
216 	return retval;
217 }
218 
219 /**
220  * v9fs_launder_page - Writeback a dirty page
221  * Returns 0 on success.
222  */
223 
224 static int v9fs_launder_page(struct page *page)
225 {
226 	int retval;
227 	struct inode *inode = page->mapping->host;
228 
229 	v9fs_fscache_wait_on_page_write(inode, page);
230 	if (clear_page_dirty_for_io(page)) {
231 		retval = v9fs_vfs_writepage_locked(page);
232 		if (retval)
233 			return retval;
234 	}
235 	return 0;
236 }
237 
238 /**
239  * v9fs_direct_IO - 9P address space operation for direct I/O
240  * @rw: direction (read or write)
241  * @iocb: target I/O control block
242  * @iov: array of vectors that define I/O buffer
243  * @pos: offset in file to begin the operation
244  * @nr_segs: size of iovec array
245  *
246  * The presence of v9fs_direct_IO() in the address space ops vector
247  * allowes open() O_DIRECT flags which would have failed otherwise.
248  *
249  * In the non-cached mode, we shunt off direct read and write requests before
250  * the VFS gets them, so this method should never be called.
251  *
252  * Direct IO is not 'yet' supported in the cached mode. Hence when
253  * this routine is called through generic_file_aio_read(), the read/write fails
254  * with an error.
255  *
256  */
257 static ssize_t
258 v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
259 	       loff_t pos, unsigned long nr_segs)
260 {
261 	/*
262 	 * FIXME
263 	 * Now that we do caching with cache mode enabled, We need
264 	 * to support direct IO
265 	 */
266 	P9_DPRINTK(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) "
267 			"off/no(%lld/%lu) EINVAL\n",
268 			iocb->ki_filp->f_path.dentry->d_name.name,
269 			(long long) pos, nr_segs);
270 
271 	return -EINVAL;
272 }
273 
274 static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
275 			    loff_t pos, unsigned len, unsigned flags,
276 			    struct page **pagep, void **fsdata)
277 {
278 	int retval = 0;
279 	struct page *page;
280 	struct v9fs_inode *v9inode;
281 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
282 	struct inode *inode = mapping->host;
283 
284 	v9inode = V9FS_I(inode);
285 start:
286 	page = grab_cache_page_write_begin(mapping, index, flags);
287 	if (!page) {
288 		retval = -ENOMEM;
289 		goto out;
290 	}
291 	BUG_ON(!v9inode->writeback_fid);
292 	if (PageUptodate(page))
293 		goto out;
294 
295 	if (len == PAGE_CACHE_SIZE)
296 		goto out;
297 
298 	retval = v9fs_fid_readpage(v9inode->writeback_fid, page);
299 	page_cache_release(page);
300 	if (!retval)
301 		goto start;
302 out:
303 	*pagep = page;
304 	return retval;
305 }
306 
307 static int v9fs_write_end(struct file *filp, struct address_space *mapping,
308 			  loff_t pos, unsigned len, unsigned copied,
309 			  struct page *page, void *fsdata)
310 {
311 	loff_t last_pos = pos + copied;
312 	struct inode *inode = page->mapping->host;
313 
314 	if (unlikely(copied < len)) {
315 		/*
316 		 * zero out the rest of the area
317 		 */
318 		unsigned from = pos & (PAGE_CACHE_SIZE - 1);
319 
320 		zero_user(page, from + copied, len - copied);
321 		flush_dcache_page(page);
322 	}
323 
324 	if (!PageUptodate(page))
325 		SetPageUptodate(page);
326 	/*
327 	 * No need to use i_size_read() here, the i_size
328 	 * cannot change under us because we hold the i_mutex.
329 	 */
330 	if (last_pos > inode->i_size) {
331 		inode_add_bytes(inode, last_pos - inode->i_size);
332 		i_size_write(inode, last_pos);
333 	}
334 	set_page_dirty(page);
335 	unlock_page(page);
336 	page_cache_release(page);
337 
338 	return copied;
339 }
340 
341 
342 const struct address_space_operations v9fs_addr_operations = {
343 	.readpage = v9fs_vfs_readpage,
344 	.readpages = v9fs_vfs_readpages,
345 	.set_page_dirty = __set_page_dirty_nobuffers,
346 	.writepage = v9fs_vfs_writepage,
347 	.write_begin = v9fs_write_begin,
348 	.write_end = v9fs_write_end,
349 	.releasepage = v9fs_release_page,
350 	.invalidatepage = v9fs_invalidate_page,
351 	.launder_page = v9fs_launder_page,
352 	.direct_IO = v9fs_direct_IO,
353 };
354