xref: /openbmc/linux/fs/9p/vfs_file.c (revision 2209fda3)
1 /*
2  *  linux/fs/9p/vfs_file.c
3  *
4  * This file contians vfs file ops for 9P2000.
5  *
6  *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7  *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
8  *
9  *  This program is free software; you can redistribute it and/or modify
10  *  it under the terms of the GNU General Public License version 2
11  *  as published by the Free Software Foundation.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, write to:
20  *  Free Software Foundation
21  *  51 Franklin Street, Fifth Floor
22  *  Boston, MA  02111-1301  USA
23  *
24  */
25 
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/fs.h>
29 #include <linux/sched.h>
30 #include <linux/file.h>
31 #include <linux/stat.h>
32 #include <linux/string.h>
33 #include <linux/inet.h>
34 #include <linux/list.h>
35 #include <linux/pagemap.h>
36 #include <linux/utsname.h>
37 #include <linux/uaccess.h>
38 #include <linux/idr.h>
39 #include <linux/uio.h>
40 #include <linux/slab.h>
41 #include <net/9p/9p.h>
42 #include <net/9p/client.h>
43 
44 #include "v9fs.h"
45 #include "v9fs_vfs.h"
46 #include "fid.h"
47 #include "cache.h"
48 
49 static const struct vm_operations_struct v9fs_file_vm_ops;
50 static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
51 
52 /**
53  * v9fs_file_open - open a file (or directory)
54  * @inode: inode to be opened
55  * @file: file being opened
56  *
57  */
58 
59 int v9fs_file_open(struct inode *inode, struct file *file)
60 {
61 	int err;
62 	struct v9fs_inode *v9inode;
63 	struct v9fs_session_info *v9ses;
64 	struct p9_fid *fid;
65 	int omode;
66 
67 	p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
68 	v9inode = V9FS_I(inode);
69 	v9ses = v9fs_inode2v9ses(inode);
70 	if (v9fs_proto_dotl(v9ses))
71 		omode = v9fs_open_to_dotl_flags(file->f_flags);
72 	else
73 		omode = v9fs_uflags2omode(file->f_flags,
74 					v9fs_proto_dotu(v9ses));
75 	fid = file->private_data;
76 	if (!fid) {
77 		fid = v9fs_fid_clone(file_dentry(file));
78 		if (IS_ERR(fid))
79 			return PTR_ERR(fid);
80 
81 		err = p9_client_open(fid, omode);
82 		if (err < 0) {
83 			p9_client_clunk(fid);
84 			return err;
85 		}
86 		if ((file->f_flags & O_APPEND) &&
87 			(!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
88 			generic_file_llseek(file, 0, SEEK_END);
89 	}
90 
91 	file->private_data = fid;
92 	mutex_lock(&v9inode->v_mutex);
93 	if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
94 	    !v9inode->writeback_fid &&
95 	    ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
96 		/*
97 		 * clone a fid and add it to writeback_fid
98 		 * we do it during open time instead of
99 		 * page dirty time via write_begin/page_mkwrite
100 		 * because we want write after unlink usecase
101 		 * to work.
102 		 */
103 		fid = v9fs_writeback_fid(file_dentry(file));
104 		if (IS_ERR(fid)) {
105 			err = PTR_ERR(fid);
106 			mutex_unlock(&v9inode->v_mutex);
107 			goto out_error;
108 		}
109 		v9inode->writeback_fid = (void *) fid;
110 	}
111 	mutex_unlock(&v9inode->v_mutex);
112 	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
113 		v9fs_cache_inode_set_cookie(inode, file);
114 	return 0;
115 out_error:
116 	p9_client_clunk(file->private_data);
117 	file->private_data = NULL;
118 	return err;
119 }
120 
121 /**
122  * v9fs_file_lock - lock a file (or directory)
123  * @filp: file to be locked
124  * @cmd: lock command
125  * @fl: file lock structure
126  *
127  * Bugs: this looks like a local only lock, we should extend into 9P
128  *       by using open exclusive
129  */
130 
131 static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
132 {
133 	int res = 0;
134 	struct inode *inode = file_inode(filp);
135 
136 	p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
137 
138 	/* No mandatory locks */
139 	if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
140 		return -ENOLCK;
141 
142 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
143 		filemap_write_and_wait(inode->i_mapping);
144 		invalidate_mapping_pages(&inode->i_data, 0, -1);
145 	}
146 
147 	return res;
148 }
149 
150 static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
151 {
152 	struct p9_flock flock;
153 	struct p9_fid *fid;
154 	uint8_t status = P9_LOCK_ERROR;
155 	int res = 0;
156 	unsigned char fl_type;
157 	struct v9fs_session_info *v9ses;
158 
159 	fid = filp->private_data;
160 	BUG_ON(fid == NULL);
161 
162 	if ((fl->fl_flags & FL_POSIX) != FL_POSIX)
163 		BUG();
164 
165 	res = locks_lock_file_wait(filp, fl);
166 	if (res < 0)
167 		goto out;
168 
169 	/* convert posix lock to p9 tlock args */
170 	memset(&flock, 0, sizeof(flock));
171 	/* map the lock type */
172 	switch (fl->fl_type) {
173 	case F_RDLCK:
174 		flock.type = P9_LOCK_TYPE_RDLCK;
175 		break;
176 	case F_WRLCK:
177 		flock.type = P9_LOCK_TYPE_WRLCK;
178 		break;
179 	case F_UNLCK:
180 		flock.type = P9_LOCK_TYPE_UNLCK;
181 		break;
182 	}
183 	flock.start = fl->fl_start;
184 	if (fl->fl_end == OFFSET_MAX)
185 		flock.length = 0;
186 	else
187 		flock.length = fl->fl_end - fl->fl_start + 1;
188 	flock.proc_id = fl->fl_pid;
189 	flock.client_id = fid->clnt->name;
190 	if (IS_SETLKW(cmd))
191 		flock.flags = P9_LOCK_FLAGS_BLOCK;
192 
193 	v9ses = v9fs_inode2v9ses(file_inode(filp));
194 
195 	/*
196 	 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
197 	 * for lock request, keep on trying
198 	 */
199 	for (;;) {
200 		res = p9_client_lock_dotl(fid, &flock, &status);
201 		if (res < 0)
202 			goto out_unlock;
203 
204 		if (status != P9_LOCK_BLOCKED)
205 			break;
206 		if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
207 			break;
208 		if (schedule_timeout_interruptible(v9ses->session_lock_timeout)
209 				!= 0)
210 			break;
211 		/*
212 		 * p9_client_lock_dotl overwrites flock.client_id with the
213 		 * server message, free and reuse the client name
214 		 */
215 		if (flock.client_id != fid->clnt->name) {
216 			kfree(flock.client_id);
217 			flock.client_id = fid->clnt->name;
218 		}
219 	}
220 
221 	/* map 9p status to VFS status */
222 	switch (status) {
223 	case P9_LOCK_SUCCESS:
224 		res = 0;
225 		break;
226 	case P9_LOCK_BLOCKED:
227 		res = -EAGAIN;
228 		break;
229 	default:
230 		WARN_ONCE(1, "unknown lock status code: %d\n", status);
231 		/* fall through */
232 	case P9_LOCK_ERROR:
233 	case P9_LOCK_GRACE:
234 		res = -ENOLCK;
235 		break;
236 	}
237 
238 out_unlock:
239 	/*
240 	 * incase server returned error for lock request, revert
241 	 * it locally
242 	 */
243 	if (res < 0 && fl->fl_type != F_UNLCK) {
244 		fl_type = fl->fl_type;
245 		fl->fl_type = F_UNLCK;
246 		/* Even if this fails we want to return the remote error */
247 		locks_lock_file_wait(filp, fl);
248 		fl->fl_type = fl_type;
249 	}
250 	if (flock.client_id != fid->clnt->name)
251 		kfree(flock.client_id);
252 out:
253 	return res;
254 }
255 
256 static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
257 {
258 	struct p9_getlock glock;
259 	struct p9_fid *fid;
260 	int res = 0;
261 
262 	fid = filp->private_data;
263 	BUG_ON(fid == NULL);
264 
265 	posix_test_lock(filp, fl);
266 	/*
267 	 * if we have a conflicting lock locally, no need to validate
268 	 * with server
269 	 */
270 	if (fl->fl_type != F_UNLCK)
271 		return res;
272 
273 	/* convert posix lock to p9 tgetlock args */
274 	memset(&glock, 0, sizeof(glock));
275 	glock.type  = P9_LOCK_TYPE_UNLCK;
276 	glock.start = fl->fl_start;
277 	if (fl->fl_end == OFFSET_MAX)
278 		glock.length = 0;
279 	else
280 		glock.length = fl->fl_end - fl->fl_start + 1;
281 	glock.proc_id = fl->fl_pid;
282 	glock.client_id = fid->clnt->name;
283 
284 	res = p9_client_getlock_dotl(fid, &glock);
285 	if (res < 0)
286 		goto out;
287 	/* map 9p lock type to os lock type */
288 	switch (glock.type) {
289 	case P9_LOCK_TYPE_RDLCK:
290 		fl->fl_type = F_RDLCK;
291 		break;
292 	case P9_LOCK_TYPE_WRLCK:
293 		fl->fl_type = F_WRLCK;
294 		break;
295 	case P9_LOCK_TYPE_UNLCK:
296 		fl->fl_type = F_UNLCK;
297 		break;
298 	}
299 	if (glock.type != P9_LOCK_TYPE_UNLCK) {
300 		fl->fl_start = glock.start;
301 		if (glock.length == 0)
302 			fl->fl_end = OFFSET_MAX;
303 		else
304 			fl->fl_end = glock.start + glock.length - 1;
305 		fl->fl_pid = -glock.proc_id;
306 	}
307 out:
308 	if (glock.client_id != fid->clnt->name)
309 		kfree(glock.client_id);
310 	return res;
311 }
312 
313 /**
314  * v9fs_file_lock_dotl - lock a file (or directory)
315  * @filp: file to be locked
316  * @cmd: lock command
317  * @fl: file lock structure
318  *
319  */
320 
321 static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
322 {
323 	struct inode *inode = file_inode(filp);
324 	int ret = -ENOLCK;
325 
326 	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
327 		 filp, cmd, fl, filp);
328 
329 	/* No mandatory locks */
330 	if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
331 		goto out_err;
332 
333 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
334 		filemap_write_and_wait(inode->i_mapping);
335 		invalidate_mapping_pages(&inode->i_data, 0, -1);
336 	}
337 
338 	if (IS_SETLK(cmd) || IS_SETLKW(cmd))
339 		ret = v9fs_file_do_lock(filp, cmd, fl);
340 	else if (IS_GETLK(cmd))
341 		ret = v9fs_file_getlock(filp, fl);
342 	else
343 		ret = -EINVAL;
344 out_err:
345 	return ret;
346 }
347 
348 /**
349  * v9fs_file_flock_dotl - lock a file
350  * @filp: file to be locked
351  * @cmd: lock command
352  * @fl: file lock structure
353  *
354  */
355 
356 static int v9fs_file_flock_dotl(struct file *filp, int cmd,
357 	struct file_lock *fl)
358 {
359 	struct inode *inode = file_inode(filp);
360 	int ret = -ENOLCK;
361 
362 	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
363 		 filp, cmd, fl, filp);
364 
365 	/* No mandatory locks */
366 	if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
367 		goto out_err;
368 
369 	if (!(fl->fl_flags & FL_FLOCK))
370 		goto out_err;
371 
372 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
373 		filemap_write_and_wait(inode->i_mapping);
374 		invalidate_mapping_pages(&inode->i_data, 0, -1);
375 	}
376 	/* Convert flock to posix lock */
377 	fl->fl_flags |= FL_POSIX;
378 	fl->fl_flags ^= FL_FLOCK;
379 
380 	if (IS_SETLK(cmd) | IS_SETLKW(cmd))
381 		ret = v9fs_file_do_lock(filp, cmd, fl);
382 	else
383 		ret = -EINVAL;
384 out_err:
385 	return ret;
386 }
387 
388 /**
389  * v9fs_file_read - read from a file
390  * @filp: file pointer to read
391  * @udata: user data buffer to read data into
392  * @count: size of buffer
393  * @offset: offset at which to read data
394  *
395  */
396 
397 static ssize_t
398 v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
399 {
400 	struct p9_fid *fid = iocb->ki_filp->private_data;
401 	int ret, err = 0;
402 
403 	p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n",
404 		 iov_iter_count(to), iocb->ki_pos);
405 
406 	ret = p9_client_read(fid, iocb->ki_pos, to, &err);
407 	if (!ret)
408 		return err;
409 
410 	iocb->ki_pos += ret;
411 	return ret;
412 }
413 
414 /**
415  * v9fs_file_write - write to a file
416  * @filp: file pointer to write
417  * @data: data buffer to write data from
418  * @count: size of buffer
419  * @offset: offset at which to write data
420  *
421  */
422 static ssize_t
423 v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
424 {
425 	struct file *file = iocb->ki_filp;
426 	ssize_t retval;
427 	loff_t origin;
428 	int err = 0;
429 
430 	retval = generic_write_checks(iocb, from);
431 	if (retval <= 0)
432 		return retval;
433 
434 	origin = iocb->ki_pos;
435 	retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err);
436 	if (retval > 0) {
437 		struct inode *inode = file_inode(file);
438 		loff_t i_size;
439 		unsigned long pg_start, pg_end;
440 		pg_start = origin >> PAGE_SHIFT;
441 		pg_end = (origin + retval - 1) >> PAGE_SHIFT;
442 		if (inode->i_mapping && inode->i_mapping->nrpages)
443 			invalidate_inode_pages2_range(inode->i_mapping,
444 						      pg_start, pg_end);
445 		iocb->ki_pos += retval;
446 		i_size = i_size_read(inode);
447 		if (iocb->ki_pos > i_size) {
448 			inode_add_bytes(inode, iocb->ki_pos - i_size);
449 			i_size_write(inode, iocb->ki_pos);
450 		}
451 		return retval;
452 	}
453 	return err;
454 }
455 
456 static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
457 			   int datasync)
458 {
459 	struct p9_fid *fid;
460 	struct inode *inode = filp->f_mapping->host;
461 	struct p9_wstat wstat;
462 	int retval;
463 
464 	retval = file_write_and_wait_range(filp, start, end);
465 	if (retval)
466 		return retval;
467 
468 	inode_lock(inode);
469 	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
470 
471 	fid = filp->private_data;
472 	v9fs_blank_wstat(&wstat);
473 
474 	retval = p9_client_wstat(fid, &wstat);
475 	inode_unlock(inode);
476 
477 	return retval;
478 }
479 
480 int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
481 			 int datasync)
482 {
483 	struct p9_fid *fid;
484 	struct inode *inode = filp->f_mapping->host;
485 	int retval;
486 
487 	retval = file_write_and_wait_range(filp, start, end);
488 	if (retval)
489 		return retval;
490 
491 	inode_lock(inode);
492 	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
493 
494 	fid = filp->private_data;
495 
496 	retval = p9_client_fsync(fid, datasync);
497 	inode_unlock(inode);
498 
499 	return retval;
500 }
501 
502 static int
503 v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
504 {
505 	int retval;
506 
507 
508 	retval = generic_file_mmap(filp, vma);
509 	if (!retval)
510 		vma->vm_ops = &v9fs_file_vm_ops;
511 
512 	return retval;
513 }
514 
515 static int
516 v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
517 {
518 	int retval;
519 	struct inode *inode;
520 	struct v9fs_inode *v9inode;
521 	struct p9_fid *fid;
522 
523 	inode = file_inode(filp);
524 	v9inode = V9FS_I(inode);
525 	mutex_lock(&v9inode->v_mutex);
526 	if (!v9inode->writeback_fid &&
527 	    (vma->vm_flags & VM_WRITE)) {
528 		/*
529 		 * clone a fid and add it to writeback_fid
530 		 * we do it during mmap instead of
531 		 * page dirty time via write_begin/page_mkwrite
532 		 * because we want write after unlink usecase
533 		 * to work.
534 		 */
535 		fid = v9fs_writeback_fid(file_dentry(filp));
536 		if (IS_ERR(fid)) {
537 			retval = PTR_ERR(fid);
538 			mutex_unlock(&v9inode->v_mutex);
539 			return retval;
540 		}
541 		v9inode->writeback_fid = (void *) fid;
542 	}
543 	mutex_unlock(&v9inode->v_mutex);
544 
545 	retval = generic_file_mmap(filp, vma);
546 	if (!retval)
547 		vma->vm_ops = &v9fs_mmap_file_vm_ops;
548 
549 	return retval;
550 }
551 
552 static vm_fault_t
553 v9fs_vm_page_mkwrite(struct vm_fault *vmf)
554 {
555 	struct v9fs_inode *v9inode;
556 	struct page *page = vmf->page;
557 	struct file *filp = vmf->vma->vm_file;
558 	struct inode *inode = file_inode(filp);
559 
560 
561 	p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n",
562 		 page, (unsigned long)filp->private_data);
563 
564 	/* Update file times before taking page lock */
565 	file_update_time(filp);
566 
567 	v9inode = V9FS_I(inode);
568 	/* make sure the cache has finished storing the page */
569 	v9fs_fscache_wait_on_page_write(inode, page);
570 	BUG_ON(!v9inode->writeback_fid);
571 	lock_page(page);
572 	if (page->mapping != inode->i_mapping)
573 		goto out_unlock;
574 	wait_for_stable_page(page);
575 
576 	return VM_FAULT_LOCKED;
577 out_unlock:
578 	unlock_page(page);
579 	return VM_FAULT_NOPAGE;
580 }
581 
582 /**
583  * v9fs_mmap_file_read - read from a file
584  * @filp: file pointer to read
585  * @data: user data buffer to read data into
586  * @count: size of buffer
587  * @offset: offset at which to read data
588  *
589  */
590 static ssize_t
591 v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
592 {
593 	/* TODO: Check if there are dirty pages */
594 	return v9fs_file_read_iter(iocb, to);
595 }
596 
597 /**
598  * v9fs_mmap_file_write - write to a file
599  * @filp: file pointer to write
600  * @data: data buffer to write data from
601  * @count: size of buffer
602  * @offset: offset at which to write data
603  *
604  */
605 static ssize_t
606 v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
607 {
608 	/*
609 	 * TODO: invalidate mmaps on filp's inode between
610 	 * offset and offset+count
611 	 */
612 	return v9fs_file_write_iter(iocb, from);
613 }
614 
615 static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
616 {
617 	struct inode *inode;
618 
619 	struct writeback_control wbc = {
620 		.nr_to_write = LONG_MAX,
621 		.sync_mode = WB_SYNC_ALL,
622 		.range_start = vma->vm_pgoff * PAGE_SIZE,
623 		 /* absolute end, byte at end included */
624 		.range_end = vma->vm_pgoff * PAGE_SIZE +
625 			(vma->vm_end - vma->vm_start - 1),
626 	};
627 
628 
629 	p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
630 
631 	inode = file_inode(vma->vm_file);
632 
633 	if (!mapping_cap_writeback_dirty(inode->i_mapping))
634 		wbc.nr_to_write = 0;
635 
636 	might_sleep();
637 	sync_inode(inode, &wbc);
638 }
639 
640 
641 static const struct vm_operations_struct v9fs_file_vm_ops = {
642 	.fault = filemap_fault,
643 	.map_pages = filemap_map_pages,
644 	.page_mkwrite = v9fs_vm_page_mkwrite,
645 };
646 
647 static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
648 	.close = v9fs_mmap_vm_close,
649 	.fault = filemap_fault,
650 	.map_pages = filemap_map_pages,
651 	.page_mkwrite = v9fs_vm_page_mkwrite,
652 };
653 
654 
655 const struct file_operations v9fs_cached_file_operations = {
656 	.llseek = generic_file_llseek,
657 	.read_iter = generic_file_read_iter,
658 	.write_iter = generic_file_write_iter,
659 	.open = v9fs_file_open,
660 	.release = v9fs_dir_release,
661 	.lock = v9fs_file_lock,
662 	.mmap = v9fs_file_mmap,
663 	.fsync = v9fs_file_fsync,
664 };
665 
666 const struct file_operations v9fs_cached_file_operations_dotl = {
667 	.llseek = generic_file_llseek,
668 	.read_iter = generic_file_read_iter,
669 	.write_iter = generic_file_write_iter,
670 	.open = v9fs_file_open,
671 	.release = v9fs_dir_release,
672 	.lock = v9fs_file_lock_dotl,
673 	.flock = v9fs_file_flock_dotl,
674 	.mmap = v9fs_file_mmap,
675 	.fsync = v9fs_file_fsync_dotl,
676 };
677 
678 const struct file_operations v9fs_file_operations = {
679 	.llseek = generic_file_llseek,
680 	.read_iter = v9fs_file_read_iter,
681 	.write_iter = v9fs_file_write_iter,
682 	.open = v9fs_file_open,
683 	.release = v9fs_dir_release,
684 	.lock = v9fs_file_lock,
685 	.mmap = generic_file_readonly_mmap,
686 	.fsync = v9fs_file_fsync,
687 };
688 
689 const struct file_operations v9fs_file_operations_dotl = {
690 	.llseek = generic_file_llseek,
691 	.read_iter = v9fs_file_read_iter,
692 	.write_iter = v9fs_file_write_iter,
693 	.open = v9fs_file_open,
694 	.release = v9fs_dir_release,
695 	.lock = v9fs_file_lock_dotl,
696 	.flock = v9fs_file_flock_dotl,
697 	.mmap = generic_file_readonly_mmap,
698 	.fsync = v9fs_file_fsync_dotl,
699 };
700 
701 const struct file_operations v9fs_mmap_file_operations = {
702 	.llseek = generic_file_llseek,
703 	.read_iter = v9fs_mmap_file_read_iter,
704 	.write_iter = v9fs_mmap_file_write_iter,
705 	.open = v9fs_file_open,
706 	.release = v9fs_dir_release,
707 	.lock = v9fs_file_lock,
708 	.mmap = v9fs_mmap_file_mmap,
709 	.fsync = v9fs_file_fsync,
710 };
711 
712 const struct file_operations v9fs_mmap_file_operations_dotl = {
713 	.llseek = generic_file_llseek,
714 	.read_iter = v9fs_mmap_file_read_iter,
715 	.write_iter = v9fs_mmap_file_write_iter,
716 	.open = v9fs_file_open,
717 	.release = v9fs_dir_release,
718 	.lock = v9fs_file_lock_dotl,
719 	.flock = v9fs_file_flock_dotl,
720 	.mmap = v9fs_mmap_file_mmap,
721 	.fsync = v9fs_file_fsync_dotl,
722 };
723