xref: /openbmc/linux/fs/9p/vfs_file.c (revision 0a94608f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file contians vfs file ops for 9P2000.
4  *
5  *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
6  *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/errno.h>
11 #include <linux/fs.h>
12 #include <linux/sched.h>
13 #include <linux/file.h>
14 #include <linux/stat.h>
15 #include <linux/string.h>
16 #include <linux/inet.h>
17 #include <linux/list.h>
18 #include <linux/pagemap.h>
19 #include <linux/utsname.h>
20 #include <linux/uaccess.h>
21 #include <linux/idr.h>
22 #include <linux/uio.h>
23 #include <linux/slab.h>
24 #include <net/9p/9p.h>
25 #include <net/9p/client.h>
26 
27 #include "v9fs.h"
28 #include "v9fs_vfs.h"
29 #include "fid.h"
30 #include "cache.h"
31 
32 static const struct vm_operations_struct v9fs_file_vm_ops;
33 static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
34 
35 /**
36  * v9fs_file_open - open a file (or directory)
37  * @inode: inode to be opened
38  * @file: file being opened
39  *
40  */
41 
42 int v9fs_file_open(struct inode *inode, struct file *file)
43 {
44 	int err;
45 	struct v9fs_inode *v9inode;
46 	struct v9fs_session_info *v9ses;
47 	struct p9_fid *fid, *writeback_fid;
48 	int omode;
49 
50 	p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
51 	v9inode = V9FS_I(inode);
52 	v9ses = v9fs_inode2v9ses(inode);
53 	if (v9fs_proto_dotl(v9ses))
54 		omode = v9fs_open_to_dotl_flags(file->f_flags);
55 	else
56 		omode = v9fs_uflags2omode(file->f_flags,
57 					v9fs_proto_dotu(v9ses));
58 	fid = file->private_data;
59 	if (!fid) {
60 		fid = v9fs_fid_clone(file_dentry(file));
61 		if (IS_ERR(fid))
62 			return PTR_ERR(fid);
63 
64 		err = p9_client_open(fid, omode);
65 		if (err < 0) {
66 			p9_client_clunk(fid);
67 			return err;
68 		}
69 		if ((file->f_flags & O_APPEND) &&
70 			(!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
71 			generic_file_llseek(file, 0, SEEK_END);
72 	}
73 
74 	file->private_data = fid;
75 	mutex_lock(&v9inode->v_mutex);
76 	if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
77 	    !v9inode->writeback_fid &&
78 	    ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
79 		/*
80 		 * clone a fid and add it to writeback_fid
81 		 * we do it during open time instead of
82 		 * page dirty time via write_begin/page_mkwrite
83 		 * because we want write after unlink usecase
84 		 * to work.
85 		 */
86 		writeback_fid = v9fs_writeback_fid(file_dentry(file));
87 		if (IS_ERR(writeback_fid)) {
88 			err = PTR_ERR(writeback_fid);
89 			mutex_unlock(&v9inode->v_mutex);
90 			goto out_error;
91 		}
92 		v9inode->writeback_fid = (void *) writeback_fid;
93 	}
94 	mutex_unlock(&v9inode->v_mutex);
95 	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
96 		fscache_use_cookie(v9fs_inode_cookie(v9inode),
97 				   file->f_mode & FMODE_WRITE);
98 	v9fs_open_fid_add(inode, fid);
99 	return 0;
100 out_error:
101 	p9_client_clunk(file->private_data);
102 	file->private_data = NULL;
103 	return err;
104 }
105 
106 /**
107  * v9fs_file_lock - lock a file (or directory)
108  * @filp: file to be locked
109  * @cmd: lock command
110  * @fl: file lock structure
111  *
112  * Bugs: this looks like a local only lock, we should extend into 9P
113  *       by using open exclusive
114  */
115 
116 static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
117 {
118 	struct inode *inode = file_inode(filp);
119 
120 	p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
121 
122 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
123 		filemap_write_and_wait(inode->i_mapping);
124 		invalidate_mapping_pages(&inode->i_data, 0, -1);
125 	}
126 
127 	return 0;
128 }
129 
130 static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
131 {
132 	struct p9_flock flock;
133 	struct p9_fid *fid;
134 	uint8_t status = P9_LOCK_ERROR;
135 	int res = 0;
136 	unsigned char fl_type;
137 	struct v9fs_session_info *v9ses;
138 
139 	fid = filp->private_data;
140 	BUG_ON(fid == NULL);
141 
142 	BUG_ON((fl->fl_flags & FL_POSIX) != FL_POSIX);
143 
144 	res = locks_lock_file_wait(filp, fl);
145 	if (res < 0)
146 		goto out;
147 
148 	/* convert posix lock to p9 tlock args */
149 	memset(&flock, 0, sizeof(flock));
150 	/* map the lock type */
151 	switch (fl->fl_type) {
152 	case F_RDLCK:
153 		flock.type = P9_LOCK_TYPE_RDLCK;
154 		break;
155 	case F_WRLCK:
156 		flock.type = P9_LOCK_TYPE_WRLCK;
157 		break;
158 	case F_UNLCK:
159 		flock.type = P9_LOCK_TYPE_UNLCK;
160 		break;
161 	}
162 	flock.start = fl->fl_start;
163 	if (fl->fl_end == OFFSET_MAX)
164 		flock.length = 0;
165 	else
166 		flock.length = fl->fl_end - fl->fl_start + 1;
167 	flock.proc_id = fl->fl_pid;
168 	flock.client_id = fid->clnt->name;
169 	if (IS_SETLKW(cmd))
170 		flock.flags = P9_LOCK_FLAGS_BLOCK;
171 
172 	v9ses = v9fs_inode2v9ses(file_inode(filp));
173 
174 	/*
175 	 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
176 	 * for lock request, keep on trying
177 	 */
178 	for (;;) {
179 		res = p9_client_lock_dotl(fid, &flock, &status);
180 		if (res < 0)
181 			goto out_unlock;
182 
183 		if (status != P9_LOCK_BLOCKED)
184 			break;
185 		if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
186 			break;
187 		if (schedule_timeout_interruptible(v9ses->session_lock_timeout)
188 				!= 0)
189 			break;
190 		/*
191 		 * p9_client_lock_dotl overwrites flock.client_id with the
192 		 * server message, free and reuse the client name
193 		 */
194 		if (flock.client_id != fid->clnt->name) {
195 			kfree(flock.client_id);
196 			flock.client_id = fid->clnt->name;
197 		}
198 	}
199 
200 	/* map 9p status to VFS status */
201 	switch (status) {
202 	case P9_LOCK_SUCCESS:
203 		res = 0;
204 		break;
205 	case P9_LOCK_BLOCKED:
206 		res = -EAGAIN;
207 		break;
208 	default:
209 		WARN_ONCE(1, "unknown lock status code: %d\n", status);
210 		fallthrough;
211 	case P9_LOCK_ERROR:
212 	case P9_LOCK_GRACE:
213 		res = -ENOLCK;
214 		break;
215 	}
216 
217 out_unlock:
218 	/*
219 	 * incase server returned error for lock request, revert
220 	 * it locally
221 	 */
222 	if (res < 0 && fl->fl_type != F_UNLCK) {
223 		fl_type = fl->fl_type;
224 		fl->fl_type = F_UNLCK;
225 		/* Even if this fails we want to return the remote error */
226 		locks_lock_file_wait(filp, fl);
227 		fl->fl_type = fl_type;
228 	}
229 	if (flock.client_id != fid->clnt->name)
230 		kfree(flock.client_id);
231 out:
232 	return res;
233 }
234 
235 static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
236 {
237 	struct p9_getlock glock;
238 	struct p9_fid *fid;
239 	int res = 0;
240 
241 	fid = filp->private_data;
242 	BUG_ON(fid == NULL);
243 
244 	posix_test_lock(filp, fl);
245 	/*
246 	 * if we have a conflicting lock locally, no need to validate
247 	 * with server
248 	 */
249 	if (fl->fl_type != F_UNLCK)
250 		return res;
251 
252 	/* convert posix lock to p9 tgetlock args */
253 	memset(&glock, 0, sizeof(glock));
254 	glock.type  = P9_LOCK_TYPE_UNLCK;
255 	glock.start = fl->fl_start;
256 	if (fl->fl_end == OFFSET_MAX)
257 		glock.length = 0;
258 	else
259 		glock.length = fl->fl_end - fl->fl_start + 1;
260 	glock.proc_id = fl->fl_pid;
261 	glock.client_id = fid->clnt->name;
262 
263 	res = p9_client_getlock_dotl(fid, &glock);
264 	if (res < 0)
265 		goto out;
266 	/* map 9p lock type to os lock type */
267 	switch (glock.type) {
268 	case P9_LOCK_TYPE_RDLCK:
269 		fl->fl_type = F_RDLCK;
270 		break;
271 	case P9_LOCK_TYPE_WRLCK:
272 		fl->fl_type = F_WRLCK;
273 		break;
274 	case P9_LOCK_TYPE_UNLCK:
275 		fl->fl_type = F_UNLCK;
276 		break;
277 	}
278 	if (glock.type != P9_LOCK_TYPE_UNLCK) {
279 		fl->fl_start = glock.start;
280 		if (glock.length == 0)
281 			fl->fl_end = OFFSET_MAX;
282 		else
283 			fl->fl_end = glock.start + glock.length - 1;
284 		fl->fl_pid = -glock.proc_id;
285 	}
286 out:
287 	if (glock.client_id != fid->clnt->name)
288 		kfree(glock.client_id);
289 	return res;
290 }
291 
292 /**
293  * v9fs_file_lock_dotl - lock a file (or directory)
294  * @filp: file to be locked
295  * @cmd: lock command
296  * @fl: file lock structure
297  *
298  */
299 
300 static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
301 {
302 	struct inode *inode = file_inode(filp);
303 	int ret = -ENOLCK;
304 
305 	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
306 		 filp, cmd, fl, filp);
307 
308 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
309 		filemap_write_and_wait(inode->i_mapping);
310 		invalidate_mapping_pages(&inode->i_data, 0, -1);
311 	}
312 
313 	if (IS_SETLK(cmd) || IS_SETLKW(cmd))
314 		ret = v9fs_file_do_lock(filp, cmd, fl);
315 	else if (IS_GETLK(cmd))
316 		ret = v9fs_file_getlock(filp, fl);
317 	else
318 		ret = -EINVAL;
319 	return ret;
320 }
321 
322 /**
323  * v9fs_file_flock_dotl - lock a file
324  * @filp: file to be locked
325  * @cmd: lock command
326  * @fl: file lock structure
327  *
328  */
329 
330 static int v9fs_file_flock_dotl(struct file *filp, int cmd,
331 	struct file_lock *fl)
332 {
333 	struct inode *inode = file_inode(filp);
334 	int ret = -ENOLCK;
335 
336 	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
337 		 filp, cmd, fl, filp);
338 
339 	if (!(fl->fl_flags & FL_FLOCK))
340 		goto out_err;
341 
342 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
343 		filemap_write_and_wait(inode->i_mapping);
344 		invalidate_mapping_pages(&inode->i_data, 0, -1);
345 	}
346 	/* Convert flock to posix lock */
347 	fl->fl_flags |= FL_POSIX;
348 	fl->fl_flags ^= FL_FLOCK;
349 
350 	if (IS_SETLK(cmd) | IS_SETLKW(cmd))
351 		ret = v9fs_file_do_lock(filp, cmd, fl);
352 	else
353 		ret = -EINVAL;
354 out_err:
355 	return ret;
356 }
357 
358 /**
359  * v9fs_file_read_iter - read from a file
360  * @iocb: The operation parameters
361  * @to: The buffer to read into
362  *
363  */
364 static ssize_t
365 v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
366 {
367 	struct p9_fid *fid = iocb->ki_filp->private_data;
368 	int ret, err = 0;
369 
370 	p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n",
371 		 iov_iter_count(to), iocb->ki_pos);
372 
373 	if (iocb->ki_filp->f_flags & O_NONBLOCK)
374 		ret = p9_client_read_once(fid, iocb->ki_pos, to, &err);
375 	else
376 		ret = p9_client_read(fid, iocb->ki_pos, to, &err);
377 	if (!ret)
378 		return err;
379 
380 	iocb->ki_pos += ret;
381 	return ret;
382 }
383 
384 /**
385  * v9fs_file_write_iter - write to a file
386  * @iocb: The operation parameters
387  * @from: The data to write
388  *
389  */
390 static ssize_t
391 v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
392 {
393 	struct file *file = iocb->ki_filp;
394 	ssize_t retval;
395 	loff_t origin;
396 	int err = 0;
397 
398 	retval = generic_write_checks(iocb, from);
399 	if (retval <= 0)
400 		return retval;
401 
402 	origin = iocb->ki_pos;
403 	retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err);
404 	if (retval > 0) {
405 		struct inode *inode = file_inode(file);
406 		loff_t i_size;
407 		unsigned long pg_start, pg_end;
408 
409 		pg_start = origin >> PAGE_SHIFT;
410 		pg_end = (origin + retval - 1) >> PAGE_SHIFT;
411 		if (inode->i_mapping && inode->i_mapping->nrpages)
412 			invalidate_inode_pages2_range(inode->i_mapping,
413 						      pg_start, pg_end);
414 		iocb->ki_pos += retval;
415 		i_size = i_size_read(inode);
416 		if (iocb->ki_pos > i_size) {
417 			inode_add_bytes(inode, iocb->ki_pos - i_size);
418 			/*
419 			 * Need to serialize against i_size_write() in
420 			 * v9fs_stat2inode()
421 			 */
422 			v9fs_i_size_write(inode, iocb->ki_pos);
423 		}
424 		return retval;
425 	}
426 	return err;
427 }
428 
429 static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
430 			   int datasync)
431 {
432 	struct p9_fid *fid;
433 	struct inode *inode = filp->f_mapping->host;
434 	struct p9_wstat wstat;
435 	int retval;
436 
437 	retval = file_write_and_wait_range(filp, start, end);
438 	if (retval)
439 		return retval;
440 
441 	inode_lock(inode);
442 	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
443 
444 	fid = filp->private_data;
445 	v9fs_blank_wstat(&wstat);
446 
447 	retval = p9_client_wstat(fid, &wstat);
448 	inode_unlock(inode);
449 
450 	return retval;
451 }
452 
453 int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
454 			 int datasync)
455 {
456 	struct p9_fid *fid;
457 	struct inode *inode = filp->f_mapping->host;
458 	int retval;
459 
460 	retval = file_write_and_wait_range(filp, start, end);
461 	if (retval)
462 		return retval;
463 
464 	inode_lock(inode);
465 	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
466 
467 	fid = filp->private_data;
468 
469 	retval = p9_client_fsync(fid, datasync);
470 	inode_unlock(inode);
471 
472 	return retval;
473 }
474 
475 static int
476 v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
477 {
478 	int retval;
479 
480 
481 	retval = generic_file_mmap(filp, vma);
482 	if (!retval)
483 		vma->vm_ops = &v9fs_file_vm_ops;
484 
485 	return retval;
486 }
487 
488 static int
489 v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
490 {
491 	int retval;
492 	struct inode *inode;
493 	struct v9fs_inode *v9inode;
494 	struct p9_fid *fid;
495 
496 	inode = file_inode(filp);
497 	v9inode = V9FS_I(inode);
498 	mutex_lock(&v9inode->v_mutex);
499 	if (!v9inode->writeback_fid &&
500 	    (vma->vm_flags & VM_SHARED) &&
501 	    (vma->vm_flags & VM_WRITE)) {
502 		/*
503 		 * clone a fid and add it to writeback_fid
504 		 * we do it during mmap instead of
505 		 * page dirty time via write_begin/page_mkwrite
506 		 * because we want write after unlink usecase
507 		 * to work.
508 		 */
509 		fid = v9fs_writeback_fid(file_dentry(filp));
510 		if (IS_ERR(fid)) {
511 			retval = PTR_ERR(fid);
512 			mutex_unlock(&v9inode->v_mutex);
513 			return retval;
514 		}
515 		v9inode->writeback_fid = (void *) fid;
516 	}
517 	mutex_unlock(&v9inode->v_mutex);
518 
519 	retval = generic_file_mmap(filp, vma);
520 	if (!retval)
521 		vma->vm_ops = &v9fs_mmap_file_vm_ops;
522 
523 	return retval;
524 }
525 
526 static vm_fault_t
527 v9fs_vm_page_mkwrite(struct vm_fault *vmf)
528 {
529 	struct v9fs_inode *v9inode;
530 	struct folio *folio = page_folio(vmf->page);
531 	struct file *filp = vmf->vma->vm_file;
532 	struct inode *inode = file_inode(filp);
533 
534 
535 	p9_debug(P9_DEBUG_VFS, "folio %p fid %lx\n",
536 		 folio, (unsigned long)filp->private_data);
537 
538 	v9inode = V9FS_I(inode);
539 
540 	/* Wait for the page to be written to the cache before we allow it to
541 	 * be modified.  We then assume the entire page will need writing back.
542 	 */
543 #ifdef CONFIG_9P_FSCACHE
544 	if (folio_test_fscache(folio) &&
545 	    folio_wait_fscache_killable(folio) < 0)
546 		return VM_FAULT_NOPAGE;
547 #endif
548 
549 	/* Update file times before taking page lock */
550 	file_update_time(filp);
551 
552 	BUG_ON(!v9inode->writeback_fid);
553 	if (folio_lock_killable(folio) < 0)
554 		return VM_FAULT_RETRY;
555 	if (folio_mapping(folio) != inode->i_mapping)
556 		goto out_unlock;
557 	folio_wait_stable(folio);
558 
559 	return VM_FAULT_LOCKED;
560 out_unlock:
561 	folio_unlock(folio);
562 	return VM_FAULT_NOPAGE;
563 }
564 
565 /**
566  * v9fs_mmap_file_read_iter - read from a file
567  * @iocb: The operation parameters
568  * @to: The buffer to read into
569  *
570  */
571 static ssize_t
572 v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
573 {
574 	/* TODO: Check if there are dirty pages */
575 	return v9fs_file_read_iter(iocb, to);
576 }
577 
578 /**
579  * v9fs_mmap_file_write_iter - write to a file
580  * @iocb: The operation parameters
581  * @from: The data to write
582  *
583  */
584 static ssize_t
585 v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
586 {
587 	/*
588 	 * TODO: invalidate mmaps on filp's inode between
589 	 * offset and offset+count
590 	 */
591 	return v9fs_file_write_iter(iocb, from);
592 }
593 
594 static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
595 {
596 	struct inode *inode;
597 
598 	struct writeback_control wbc = {
599 		.nr_to_write = LONG_MAX,
600 		.sync_mode = WB_SYNC_ALL,
601 		.range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE,
602 		 /* absolute end, byte at end included */
603 		.range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE +
604 			(vma->vm_end - vma->vm_start - 1),
605 	};
606 
607 	if (!(vma->vm_flags & VM_SHARED))
608 		return;
609 
610 	p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
611 
612 	inode = file_inode(vma->vm_file);
613 	filemap_fdatawrite_wbc(inode->i_mapping, &wbc);
614 }
615 
616 
617 static const struct vm_operations_struct v9fs_file_vm_ops = {
618 	.fault = filemap_fault,
619 	.map_pages = filemap_map_pages,
620 	.page_mkwrite = v9fs_vm_page_mkwrite,
621 };
622 
623 static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
624 	.close = v9fs_mmap_vm_close,
625 	.fault = filemap_fault,
626 	.map_pages = filemap_map_pages,
627 	.page_mkwrite = v9fs_vm_page_mkwrite,
628 };
629 
630 
631 const struct file_operations v9fs_cached_file_operations = {
632 	.llseek = generic_file_llseek,
633 	.read_iter = generic_file_read_iter,
634 	.write_iter = generic_file_write_iter,
635 	.open = v9fs_file_open,
636 	.release = v9fs_dir_release,
637 	.lock = v9fs_file_lock,
638 	.mmap = v9fs_file_mmap,
639 	.splice_read = generic_file_splice_read,
640 	.splice_write = iter_file_splice_write,
641 	.fsync = v9fs_file_fsync,
642 };
643 
644 const struct file_operations v9fs_cached_file_operations_dotl = {
645 	.llseek = generic_file_llseek,
646 	.read_iter = generic_file_read_iter,
647 	.write_iter = generic_file_write_iter,
648 	.open = v9fs_file_open,
649 	.release = v9fs_dir_release,
650 	.lock = v9fs_file_lock_dotl,
651 	.flock = v9fs_file_flock_dotl,
652 	.mmap = v9fs_file_mmap,
653 	.splice_read = generic_file_splice_read,
654 	.splice_write = iter_file_splice_write,
655 	.fsync = v9fs_file_fsync_dotl,
656 };
657 
658 const struct file_operations v9fs_file_operations = {
659 	.llseek = generic_file_llseek,
660 	.read_iter = v9fs_file_read_iter,
661 	.write_iter = v9fs_file_write_iter,
662 	.open = v9fs_file_open,
663 	.release = v9fs_dir_release,
664 	.lock = v9fs_file_lock,
665 	.mmap = generic_file_readonly_mmap,
666 	.splice_read = generic_file_splice_read,
667 	.splice_write = iter_file_splice_write,
668 	.fsync = v9fs_file_fsync,
669 };
670 
671 const struct file_operations v9fs_file_operations_dotl = {
672 	.llseek = generic_file_llseek,
673 	.read_iter = v9fs_file_read_iter,
674 	.write_iter = v9fs_file_write_iter,
675 	.open = v9fs_file_open,
676 	.release = v9fs_dir_release,
677 	.lock = v9fs_file_lock_dotl,
678 	.flock = v9fs_file_flock_dotl,
679 	.mmap = generic_file_readonly_mmap,
680 	.splice_read = generic_file_splice_read,
681 	.splice_write = iter_file_splice_write,
682 	.fsync = v9fs_file_fsync_dotl,
683 };
684 
685 const struct file_operations v9fs_mmap_file_operations = {
686 	.llseek = generic_file_llseek,
687 	.read_iter = v9fs_mmap_file_read_iter,
688 	.write_iter = v9fs_mmap_file_write_iter,
689 	.open = v9fs_file_open,
690 	.release = v9fs_dir_release,
691 	.lock = v9fs_file_lock,
692 	.mmap = v9fs_mmap_file_mmap,
693 	.splice_read = generic_file_splice_read,
694 	.splice_write = iter_file_splice_write,
695 	.fsync = v9fs_file_fsync,
696 };
697 
698 const struct file_operations v9fs_mmap_file_operations_dotl = {
699 	.llseek = generic_file_llseek,
700 	.read_iter = v9fs_mmap_file_read_iter,
701 	.write_iter = v9fs_mmap_file_write_iter,
702 	.open = v9fs_file_open,
703 	.release = v9fs_dir_release,
704 	.lock = v9fs_file_lock_dotl,
705 	.flock = v9fs_file_flock_dotl,
706 	.mmap = v9fs_mmap_file_mmap,
707 	.splice_read = generic_file_splice_read,
708 	.splice_write = iter_file_splice_write,
709 	.fsync = v9fs_file_fsync_dotl,
710 };
711