xref: /openbmc/linux/fs/9p/vfs_file.c (revision 77d84ff8)
1 /*
2  *  linux/fs/9p/vfs_file.c
3  *
4  * This file contians vfs file ops for 9P2000.
5  *
6  *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7  *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
8  *
9  *  This program is free software; you can redistribute it and/or modify
10  *  it under the terms of the GNU General Public License version 2
11  *  as published by the Free Software Foundation.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, write to:
20  *  Free Software Foundation
21  *  51 Franklin Street, Fifth Floor
22  *  Boston, MA  02111-1301  USA
23  *
24  */
25 
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/fs.h>
29 #include <linux/sched.h>
30 #include <linux/file.h>
31 #include <linux/stat.h>
32 #include <linux/string.h>
33 #include <linux/inet.h>
34 #include <linux/list.h>
35 #include <linux/pagemap.h>
36 #include <linux/utsname.h>
37 #include <asm/uaccess.h>
38 #include <linux/idr.h>
39 #include <net/9p/9p.h>
40 #include <net/9p/client.h>
41 
42 #include "v9fs.h"
43 #include "v9fs_vfs.h"
44 #include "fid.h"
45 #include "cache.h"
46 
47 static const struct vm_operations_struct v9fs_file_vm_ops;
48 
49 /**
50  * v9fs_file_open - open a file (or directory)
51  * @inode: inode to be opened
52  * @file: file being opened
53  *
54  */
55 
56 int v9fs_file_open(struct inode *inode, struct file *file)
57 {
58 	int err;
59 	struct v9fs_inode *v9inode;
60 	struct v9fs_session_info *v9ses;
61 	struct p9_fid *fid;
62 	int omode;
63 
64 	p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
65 	v9inode = V9FS_I(inode);
66 	v9ses = v9fs_inode2v9ses(inode);
67 	if (v9fs_proto_dotl(v9ses))
68 		omode = v9fs_open_to_dotl_flags(file->f_flags);
69 	else
70 		omode = v9fs_uflags2omode(file->f_flags,
71 					v9fs_proto_dotu(v9ses));
72 	fid = file->private_data;
73 	if (!fid) {
74 		fid = v9fs_fid_clone(file->f_path.dentry);
75 		if (IS_ERR(fid))
76 			return PTR_ERR(fid);
77 
78 		err = p9_client_open(fid, omode);
79 		if (err < 0) {
80 			p9_client_clunk(fid);
81 			return err;
82 		}
83 		if ((file->f_flags & O_APPEND) &&
84 			(!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
85 			generic_file_llseek(file, 0, SEEK_END);
86 	}
87 
88 	file->private_data = fid;
89 	mutex_lock(&v9inode->v_mutex);
90 	if (v9ses->cache && !v9inode->writeback_fid &&
91 	    ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
92 		/*
93 		 * clone a fid and add it to writeback_fid
94 		 * we do it during open time instead of
95 		 * page dirty time via write_begin/page_mkwrite
96 		 * because we want write after unlink usecase
97 		 * to work.
98 		 */
99 		fid = v9fs_writeback_fid(file->f_path.dentry);
100 		if (IS_ERR(fid)) {
101 			err = PTR_ERR(fid);
102 			mutex_unlock(&v9inode->v_mutex);
103 			goto out_error;
104 		}
105 		v9inode->writeback_fid = (void *) fid;
106 	}
107 	mutex_unlock(&v9inode->v_mutex);
108 	if (v9ses->cache)
109 		v9fs_cache_inode_set_cookie(inode, file);
110 	return 0;
111 out_error:
112 	p9_client_clunk(file->private_data);
113 	file->private_data = NULL;
114 	return err;
115 }
116 
117 /**
118  * v9fs_file_lock - lock a file (or directory)
119  * @filp: file to be locked
120  * @cmd: lock command
121  * @fl: file lock structure
122  *
123  * Bugs: this looks like a local only lock, we should extend into 9P
124  *       by using open exclusive
125  */
126 
127 static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
128 {
129 	int res = 0;
130 	struct inode *inode = file_inode(filp);
131 
132 	p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
133 
134 	/* No mandatory locks */
135 	if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
136 		return -ENOLCK;
137 
138 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
139 		filemap_write_and_wait(inode->i_mapping);
140 		invalidate_mapping_pages(&inode->i_data, 0, -1);
141 	}
142 
143 	return res;
144 }
145 
146 static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
147 {
148 	struct p9_flock flock;
149 	struct p9_fid *fid;
150 	uint8_t status;
151 	int res = 0;
152 	unsigned char fl_type;
153 
154 	fid = filp->private_data;
155 	BUG_ON(fid == NULL);
156 
157 	if ((fl->fl_flags & FL_POSIX) != FL_POSIX)
158 		BUG();
159 
160 	res = posix_lock_file_wait(filp, fl);
161 	if (res < 0)
162 		goto out;
163 
164 	/* convert posix lock to p9 tlock args */
165 	memset(&flock, 0, sizeof(flock));
166 	/* map the lock type */
167 	switch (fl->fl_type) {
168 	case F_RDLCK:
169 		flock.type = P9_LOCK_TYPE_RDLCK;
170 		break;
171 	case F_WRLCK:
172 		flock.type = P9_LOCK_TYPE_WRLCK;
173 		break;
174 	case F_UNLCK:
175 		flock.type = P9_LOCK_TYPE_UNLCK;
176 		break;
177 	}
178 	flock.start = fl->fl_start;
179 	if (fl->fl_end == OFFSET_MAX)
180 		flock.length = 0;
181 	else
182 		flock.length = fl->fl_end - fl->fl_start + 1;
183 	flock.proc_id = fl->fl_pid;
184 	flock.client_id = fid->clnt->name;
185 	if (IS_SETLKW(cmd))
186 		flock.flags = P9_LOCK_FLAGS_BLOCK;
187 
188 	/*
189 	 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
190 	 * for lock request, keep on trying
191 	 */
192 	for (;;) {
193 		res = p9_client_lock_dotl(fid, &flock, &status);
194 		if (res < 0)
195 			break;
196 
197 		if (status != P9_LOCK_BLOCKED)
198 			break;
199 		if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
200 			break;
201 		if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
202 			break;
203 	}
204 
205 	/* map 9p status to VFS status */
206 	switch (status) {
207 	case P9_LOCK_SUCCESS:
208 		res = 0;
209 		break;
210 	case P9_LOCK_BLOCKED:
211 		res = -EAGAIN;
212 		break;
213 	case P9_LOCK_ERROR:
214 	case P9_LOCK_GRACE:
215 		res = -ENOLCK;
216 		break;
217 	default:
218 		BUG();
219 	}
220 
221 	/*
222 	 * incase server returned error for lock request, revert
223 	 * it locally
224 	 */
225 	if (res < 0 && fl->fl_type != F_UNLCK) {
226 		fl_type = fl->fl_type;
227 		fl->fl_type = F_UNLCK;
228 		res = posix_lock_file_wait(filp, fl);
229 		fl->fl_type = fl_type;
230 	}
231 out:
232 	return res;
233 }
234 
235 static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
236 {
237 	struct p9_getlock glock;
238 	struct p9_fid *fid;
239 	int res = 0;
240 
241 	fid = filp->private_data;
242 	BUG_ON(fid == NULL);
243 
244 	posix_test_lock(filp, fl);
245 	/*
246 	 * if we have a conflicting lock locally, no need to validate
247 	 * with server
248 	 */
249 	if (fl->fl_type != F_UNLCK)
250 		return res;
251 
252 	/* convert posix lock to p9 tgetlock args */
253 	memset(&glock, 0, sizeof(glock));
254 	glock.type  = P9_LOCK_TYPE_UNLCK;
255 	glock.start = fl->fl_start;
256 	if (fl->fl_end == OFFSET_MAX)
257 		glock.length = 0;
258 	else
259 		glock.length = fl->fl_end - fl->fl_start + 1;
260 	glock.proc_id = fl->fl_pid;
261 	glock.client_id = fid->clnt->name;
262 
263 	res = p9_client_getlock_dotl(fid, &glock);
264 	if (res < 0)
265 		return res;
266 	/* map 9p lock type to os lock type */
267 	switch (glock.type) {
268 	case P9_LOCK_TYPE_RDLCK:
269 		fl->fl_type = F_RDLCK;
270 		break;
271 	case P9_LOCK_TYPE_WRLCK:
272 		fl->fl_type = F_WRLCK;
273 		break;
274 	case P9_LOCK_TYPE_UNLCK:
275 		fl->fl_type = F_UNLCK;
276 		break;
277 	}
278 	if (glock.type != P9_LOCK_TYPE_UNLCK) {
279 		fl->fl_start = glock.start;
280 		if (glock.length == 0)
281 			fl->fl_end = OFFSET_MAX;
282 		else
283 			fl->fl_end = glock.start + glock.length - 1;
284 		fl->fl_pid = glock.proc_id;
285 	}
286 	return res;
287 }
288 
289 /**
290  * v9fs_file_lock_dotl - lock a file (or directory)
291  * @filp: file to be locked
292  * @cmd: lock command
293  * @fl: file lock structure
294  *
295  */
296 
297 static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
298 {
299 	struct inode *inode = file_inode(filp);
300 	int ret = -ENOLCK;
301 
302 	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n",
303 		 filp, cmd, fl, filp->f_path.dentry->d_name.name);
304 
305 	/* No mandatory locks */
306 	if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
307 		goto out_err;
308 
309 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
310 		filemap_write_and_wait(inode->i_mapping);
311 		invalidate_mapping_pages(&inode->i_data, 0, -1);
312 	}
313 
314 	if (IS_SETLK(cmd) || IS_SETLKW(cmd))
315 		ret = v9fs_file_do_lock(filp, cmd, fl);
316 	else if (IS_GETLK(cmd))
317 		ret = v9fs_file_getlock(filp, fl);
318 	else
319 		ret = -EINVAL;
320 out_err:
321 	return ret;
322 }
323 
324 /**
325  * v9fs_file_flock_dotl - lock a file
326  * @filp: file to be locked
327  * @cmd: lock command
328  * @fl: file lock structure
329  *
330  */
331 
332 static int v9fs_file_flock_dotl(struct file *filp, int cmd,
333 	struct file_lock *fl)
334 {
335 	struct inode *inode = file_inode(filp);
336 	int ret = -ENOLCK;
337 
338 	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n",
339 		 filp, cmd, fl, filp->f_path.dentry->d_name.name);
340 
341 	/* No mandatory locks */
342 	if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
343 		goto out_err;
344 
345 	if (!(fl->fl_flags & FL_FLOCK))
346 		goto out_err;
347 
348 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
349 		filemap_write_and_wait(inode->i_mapping);
350 		invalidate_mapping_pages(&inode->i_data, 0, -1);
351 	}
352 	/* Convert flock to posix lock */
353 	fl->fl_owner = (fl_owner_t)filp;
354 	fl->fl_start = 0;
355 	fl->fl_end = OFFSET_MAX;
356 	fl->fl_flags |= FL_POSIX;
357 	fl->fl_flags ^= FL_FLOCK;
358 
359 	if (IS_SETLK(cmd) | IS_SETLKW(cmd))
360 		ret = v9fs_file_do_lock(filp, cmd, fl);
361 	else
362 		ret = -EINVAL;
363 out_err:
364 	return ret;
365 }
366 
367 /**
368  * v9fs_fid_readn - read from a fid
369  * @fid: fid to read
370  * @data: data buffer to read data into
371  * @udata: user data buffer to read data into
372  * @count: size of buffer
373  * @offset: offset at which to read data
374  *
375  */
376 ssize_t
377 v9fs_fid_readn(struct p9_fid *fid, char *data, char __user *udata, u32 count,
378 	       u64 offset)
379 {
380 	int n, total, size;
381 
382 	p9_debug(P9_DEBUG_VFS, "fid %d offset %llu count %d\n",
383 		 fid->fid, (long long unsigned)offset, count);
384 	n = 0;
385 	total = 0;
386 	size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
387 	do {
388 		n = p9_client_read(fid, data, udata, offset, count);
389 		if (n <= 0)
390 			break;
391 
392 		if (data)
393 			data += n;
394 		if (udata)
395 			udata += n;
396 
397 		offset += n;
398 		count -= n;
399 		total += n;
400 	} while (count > 0 && n == size);
401 
402 	if (n < 0)
403 		total = n;
404 
405 	return total;
406 }
407 
408 /**
409  * v9fs_file_readn - read from a file
410  * @filp: file pointer to read
411  * @data: data buffer to read data into
412  * @udata: user data buffer to read data into
413  * @count: size of buffer
414  * @offset: offset at which to read data
415  *
416  */
417 ssize_t
418 v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
419 	       u64 offset)
420 {
421 	return v9fs_fid_readn(filp->private_data, data, udata, count, offset);
422 }
423 
424 /**
425  * v9fs_file_read - read from a file
426  * @filp: file pointer to read
427  * @udata: user data buffer to read data into
428  * @count: size of buffer
429  * @offset: offset at which to read data
430  *
431  */
432 
433 static ssize_t
434 v9fs_file_read(struct file *filp, char __user *udata, size_t count,
435 	       loff_t * offset)
436 {
437 	int ret;
438 	struct p9_fid *fid;
439 	size_t size;
440 
441 	p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n", count, *offset);
442 	fid = filp->private_data;
443 
444 	size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
445 	if (count > size)
446 		ret = v9fs_file_readn(filp, NULL, udata, count, *offset);
447 	else
448 		ret = p9_client_read(fid, NULL, udata, *offset, count);
449 
450 	if (ret > 0)
451 		*offset += ret;
452 
453 	return ret;
454 }
455 
456 ssize_t
457 v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid,
458 			 const char __user *data, size_t count,
459 			 loff_t *offset, int invalidate)
460 {
461 	int n;
462 	loff_t i_size;
463 	size_t total = 0;
464 	struct p9_client *clnt;
465 	loff_t origin = *offset;
466 	unsigned long pg_start, pg_end;
467 
468 	p9_debug(P9_DEBUG_VFS, "data %p count %d offset %x\n",
469 		 data, (int)count, (int)*offset);
470 
471 	clnt = fid->clnt;
472 	do {
473 		n = p9_client_write(fid, NULL, data+total, origin+total, count);
474 		if (n <= 0)
475 			break;
476 		count -= n;
477 		total += n;
478 	} while (count > 0);
479 
480 	if (invalidate && (total > 0)) {
481 		pg_start = origin >> PAGE_CACHE_SHIFT;
482 		pg_end = (origin + total - 1) >> PAGE_CACHE_SHIFT;
483 		if (inode->i_mapping && inode->i_mapping->nrpages)
484 			invalidate_inode_pages2_range(inode->i_mapping,
485 						      pg_start, pg_end);
486 		*offset += total;
487 		i_size = i_size_read(inode);
488 		if (*offset > i_size) {
489 			inode_add_bytes(inode, *offset - i_size);
490 			i_size_write(inode, *offset);
491 		}
492 	}
493 	if (n < 0)
494 		return n;
495 
496 	return total;
497 }
498 
499 /**
500  * v9fs_file_write - write to a file
501  * @filp: file pointer to write
502  * @data: data buffer to write data from
503  * @count: size of buffer
504  * @offset: offset at which to write data
505  *
506  */
507 static ssize_t
508 v9fs_file_write(struct file *filp, const char __user * data,
509 		size_t count, loff_t *offset)
510 {
511 	ssize_t retval = 0;
512 	loff_t origin = *offset;
513 
514 
515 	retval = generic_write_checks(filp, &origin, &count, 0);
516 	if (retval)
517 		goto out;
518 
519 	retval = -EINVAL;
520 	if ((ssize_t) count < 0)
521 		goto out;
522 	retval = 0;
523 	if (!count)
524 		goto out;
525 
526 	retval = v9fs_file_write_internal(file_inode(filp),
527 					filp->private_data,
528 					data, count, &origin, 1);
529 	/* update offset on successful write */
530 	if (retval > 0)
531 		*offset = origin;
532 out:
533 	return retval;
534 }
535 
536 
537 static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
538 			   int datasync)
539 {
540 	struct p9_fid *fid;
541 	struct inode *inode = filp->f_mapping->host;
542 	struct p9_wstat wstat;
543 	int retval;
544 
545 	retval = filemap_write_and_wait_range(inode->i_mapping, start, end);
546 	if (retval)
547 		return retval;
548 
549 	mutex_lock(&inode->i_mutex);
550 	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
551 
552 	fid = filp->private_data;
553 	v9fs_blank_wstat(&wstat);
554 
555 	retval = p9_client_wstat(fid, &wstat);
556 	mutex_unlock(&inode->i_mutex);
557 
558 	return retval;
559 }
560 
561 int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
562 			 int datasync)
563 {
564 	struct p9_fid *fid;
565 	struct inode *inode = filp->f_mapping->host;
566 	int retval;
567 
568 	retval = filemap_write_and_wait_range(inode->i_mapping, start, end);
569 	if (retval)
570 		return retval;
571 
572 	mutex_lock(&inode->i_mutex);
573 	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
574 
575 	fid = filp->private_data;
576 
577 	retval = p9_client_fsync(fid, datasync);
578 	mutex_unlock(&inode->i_mutex);
579 
580 	return retval;
581 }
582 
583 static int
584 v9fs_file_mmap(struct file *file, struct vm_area_struct *vma)
585 {
586 	int retval;
587 
588 	retval = generic_file_mmap(file, vma);
589 	if (!retval)
590 		vma->vm_ops = &v9fs_file_vm_ops;
591 
592 	return retval;
593 }
594 
595 static int
596 v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
597 {
598 	struct v9fs_inode *v9inode;
599 	struct page *page = vmf->page;
600 	struct file *filp = vma->vm_file;
601 	struct inode *inode = file_inode(filp);
602 
603 
604 	p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n",
605 		 page, (unsigned long)filp->private_data);
606 
607 	/* Update file times before taking page lock */
608 	file_update_time(filp);
609 
610 	v9inode = V9FS_I(inode);
611 	/* make sure the cache has finished storing the page */
612 	v9fs_fscache_wait_on_page_write(inode, page);
613 	BUG_ON(!v9inode->writeback_fid);
614 	lock_page(page);
615 	if (page->mapping != inode->i_mapping)
616 		goto out_unlock;
617 	wait_for_stable_page(page);
618 
619 	return VM_FAULT_LOCKED;
620 out_unlock:
621 	unlock_page(page);
622 	return VM_FAULT_NOPAGE;
623 }
624 
625 static ssize_t
626 v9fs_direct_read(struct file *filp, char __user *udata, size_t count,
627 		 loff_t *offsetp)
628 {
629 	loff_t size, offset;
630 	struct inode *inode;
631 	struct address_space *mapping;
632 
633 	offset = *offsetp;
634 	mapping = filp->f_mapping;
635 	inode = mapping->host;
636 	if (!count)
637 		return 0;
638 	size = i_size_read(inode);
639 	if (offset < size)
640 		filemap_write_and_wait_range(mapping, offset,
641 					     offset + count - 1);
642 
643 	return v9fs_file_read(filp, udata, count, offsetp);
644 }
645 
646 /**
647  * v9fs_cached_file_read - read from a file
648  * @filp: file pointer to read
649  * @udata: user data buffer to read data into
650  * @count: size of buffer
651  * @offset: offset at which to read data
652  *
653  */
654 static ssize_t
655 v9fs_cached_file_read(struct file *filp, char __user *data, size_t count,
656 		      loff_t *offset)
657 {
658 	if (filp->f_flags & O_DIRECT)
659 		return v9fs_direct_read(filp, data, count, offset);
660 	return do_sync_read(filp, data, count, offset);
661 }
662 
663 static ssize_t
664 v9fs_direct_write(struct file *filp, const char __user * data,
665 		  size_t count, loff_t *offsetp)
666 {
667 	loff_t offset;
668 	ssize_t retval;
669 	struct inode *inode;
670 	struct address_space *mapping;
671 
672 	offset = *offsetp;
673 	mapping = filp->f_mapping;
674 	inode = mapping->host;
675 	if (!count)
676 		return 0;
677 
678 	mutex_lock(&inode->i_mutex);
679 	retval = filemap_write_and_wait_range(mapping, offset,
680 					      offset + count - 1);
681 	if (retval)
682 		goto err_out;
683 	/*
684 	 * After a write we want buffered reads to be sure to go to disk to get
685 	 * the new data.  We invalidate clean cached page from the region we're
686 	 * about to write.  We do this *before* the write so that if we fail
687 	 * here we fall back to buffered write
688 	 */
689 	if (mapping->nrpages) {
690 		pgoff_t pg_start = offset >> PAGE_CACHE_SHIFT;
691 		pgoff_t pg_end   = (offset + count - 1) >> PAGE_CACHE_SHIFT;
692 
693 		retval = invalidate_inode_pages2_range(mapping,
694 							pg_start, pg_end);
695 		/*
696 		 * If a page can not be invalidated, fall back
697 		 * to buffered write.
698 		 */
699 		if (retval) {
700 			if (retval == -EBUSY)
701 				goto buff_write;
702 			goto err_out;
703 		}
704 	}
705 	retval = v9fs_file_write(filp, data, count, offsetp);
706 err_out:
707 	mutex_unlock(&inode->i_mutex);
708 	return retval;
709 
710 buff_write:
711 	mutex_unlock(&inode->i_mutex);
712 	return do_sync_write(filp, data, count, offsetp);
713 }
714 
715 /**
716  * v9fs_cached_file_write - write to a file
717  * @filp: file pointer to write
718  * @data: data buffer to write data from
719  * @count: size of buffer
720  * @offset: offset at which to write data
721  *
722  */
723 static ssize_t
724 v9fs_cached_file_write(struct file *filp, const char __user * data,
725 		       size_t count, loff_t *offset)
726 {
727 
728 	if (filp->f_flags & O_DIRECT)
729 		return v9fs_direct_write(filp, data, count, offset);
730 	return do_sync_write(filp, data, count, offset);
731 }
732 
733 static const struct vm_operations_struct v9fs_file_vm_ops = {
734 	.fault = filemap_fault,
735 	.page_mkwrite = v9fs_vm_page_mkwrite,
736 	.remap_pages = generic_file_remap_pages,
737 };
738 
739 
740 const struct file_operations v9fs_cached_file_operations = {
741 	.llseek = generic_file_llseek,
742 	.read = v9fs_cached_file_read,
743 	.write = v9fs_cached_file_write,
744 	.aio_read = generic_file_aio_read,
745 	.aio_write = generic_file_aio_write,
746 	.open = v9fs_file_open,
747 	.release = v9fs_dir_release,
748 	.lock = v9fs_file_lock,
749 	.mmap = v9fs_file_mmap,
750 	.fsync = v9fs_file_fsync,
751 };
752 
753 const struct file_operations v9fs_cached_file_operations_dotl = {
754 	.llseek = generic_file_llseek,
755 	.read = v9fs_cached_file_read,
756 	.write = v9fs_cached_file_write,
757 	.aio_read = generic_file_aio_read,
758 	.aio_write = generic_file_aio_write,
759 	.open = v9fs_file_open,
760 	.release = v9fs_dir_release,
761 	.lock = v9fs_file_lock_dotl,
762 	.flock = v9fs_file_flock_dotl,
763 	.mmap = v9fs_file_mmap,
764 	.fsync = v9fs_file_fsync_dotl,
765 };
766 
767 const struct file_operations v9fs_file_operations = {
768 	.llseek = generic_file_llseek,
769 	.read = v9fs_file_read,
770 	.write = v9fs_file_write,
771 	.open = v9fs_file_open,
772 	.release = v9fs_dir_release,
773 	.lock = v9fs_file_lock,
774 	.mmap = generic_file_readonly_mmap,
775 	.fsync = v9fs_file_fsync,
776 };
777 
778 const struct file_operations v9fs_file_operations_dotl = {
779 	.llseek = generic_file_llseek,
780 	.read = v9fs_file_read,
781 	.write = v9fs_file_write,
782 	.open = v9fs_file_open,
783 	.release = v9fs_dir_release,
784 	.lock = v9fs_file_lock_dotl,
785 	.flock = v9fs_file_flock_dotl,
786 	.mmap = generic_file_readonly_mmap,
787 	.fsync = v9fs_file_fsync_dotl,
788 };
789