xref: /openbmc/linux/fs/9p/vfs_file.c (revision 6219b20e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/9p/vfs_file.c
4  *
5  * This file contians vfs file ops for 9P2000.
6  *
7  *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
8  *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
9  */
10 
11 #include <linux/module.h>
12 #include <linux/errno.h>
13 #include <linux/fs.h>
14 #include <linux/sched.h>
15 #include <linux/file.h>
16 #include <linux/stat.h>
17 #include <linux/string.h>
18 #include <linux/inet.h>
19 #include <linux/list.h>
20 #include <linux/pagemap.h>
21 #include <linux/utsname.h>
22 #include <linux/uaccess.h>
23 #include <linux/idr.h>
24 #include <linux/uio.h>
25 #include <linux/slab.h>
26 #include <net/9p/9p.h>
27 #include <net/9p/client.h>
28 
29 #include "v9fs.h"
30 #include "v9fs_vfs.h"
31 #include "fid.h"
32 #include "cache.h"
33 
34 static const struct vm_operations_struct v9fs_file_vm_ops;
35 static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
36 
37 /**
38  * v9fs_file_open - open a file (or directory)
39  * @inode: inode to be opened
40  * @file: file being opened
41  *
42  */
43 
44 int v9fs_file_open(struct inode *inode, struct file *file)
45 {
46 	int err;
47 	struct v9fs_inode *v9inode;
48 	struct v9fs_session_info *v9ses;
49 	struct p9_fid *fid, *writeback_fid;
50 	int omode;
51 
52 	p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
53 	v9inode = V9FS_I(inode);
54 	v9ses = v9fs_inode2v9ses(inode);
55 	if (v9fs_proto_dotl(v9ses))
56 		omode = v9fs_open_to_dotl_flags(file->f_flags);
57 	else
58 		omode = v9fs_uflags2omode(file->f_flags,
59 					v9fs_proto_dotu(v9ses));
60 	fid = file->private_data;
61 	if (!fid) {
62 		fid = v9fs_fid_clone(file_dentry(file));
63 		if (IS_ERR(fid))
64 			return PTR_ERR(fid);
65 
66 		err = p9_client_open(fid, omode);
67 		if (err < 0) {
68 			p9_client_clunk(fid);
69 			return err;
70 		}
71 		if ((file->f_flags & O_APPEND) &&
72 			(!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
73 			generic_file_llseek(file, 0, SEEK_END);
74 	}
75 
76 	file->private_data = fid;
77 	mutex_lock(&v9inode->v_mutex);
78 	if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
79 	    !v9inode->writeback_fid &&
80 	    ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
81 		/*
82 		 * clone a fid and add it to writeback_fid
83 		 * we do it during open time instead of
84 		 * page dirty time via write_begin/page_mkwrite
85 		 * because we want write after unlink usecase
86 		 * to work.
87 		 */
88 		writeback_fid = v9fs_writeback_fid(file_dentry(file));
89 		if (IS_ERR(writeback_fid)) {
90 			err = PTR_ERR(writeback_fid);
91 			mutex_unlock(&v9inode->v_mutex);
92 			goto out_error;
93 		}
94 		v9inode->writeback_fid = (void *) writeback_fid;
95 	}
96 	mutex_unlock(&v9inode->v_mutex);
97 	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
98 		v9fs_cache_inode_set_cookie(inode, file);
99 	v9fs_open_fid_add(inode, fid);
100 	return 0;
101 out_error:
102 	p9_client_clunk(file->private_data);
103 	file->private_data = NULL;
104 	return err;
105 }
106 
107 /**
108  * v9fs_file_lock - lock a file (or directory)
109  * @filp: file to be locked
110  * @cmd: lock command
111  * @fl: file lock structure
112  *
113  * Bugs: this looks like a local only lock, we should extend into 9P
114  *       by using open exclusive
115  */
116 
117 static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
118 {
119 	int res = 0;
120 	struct inode *inode = file_inode(filp);
121 
122 	p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
123 
124 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
125 		filemap_write_and_wait(inode->i_mapping);
126 		invalidate_mapping_pages(&inode->i_data, 0, -1);
127 	}
128 
129 	return res;
130 }
131 
132 static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
133 {
134 	struct p9_flock flock;
135 	struct p9_fid *fid;
136 	uint8_t status = P9_LOCK_ERROR;
137 	int res = 0;
138 	unsigned char fl_type;
139 	struct v9fs_session_info *v9ses;
140 
141 	fid = filp->private_data;
142 	BUG_ON(fid == NULL);
143 
144 	if ((fl->fl_flags & FL_POSIX) != FL_POSIX)
145 		BUG();
146 
147 	res = locks_lock_file_wait(filp, fl);
148 	if (res < 0)
149 		goto out;
150 
151 	/* convert posix lock to p9 tlock args */
152 	memset(&flock, 0, sizeof(flock));
153 	/* map the lock type */
154 	switch (fl->fl_type) {
155 	case F_RDLCK:
156 		flock.type = P9_LOCK_TYPE_RDLCK;
157 		break;
158 	case F_WRLCK:
159 		flock.type = P9_LOCK_TYPE_WRLCK;
160 		break;
161 	case F_UNLCK:
162 		flock.type = P9_LOCK_TYPE_UNLCK;
163 		break;
164 	}
165 	flock.start = fl->fl_start;
166 	if (fl->fl_end == OFFSET_MAX)
167 		flock.length = 0;
168 	else
169 		flock.length = fl->fl_end - fl->fl_start + 1;
170 	flock.proc_id = fl->fl_pid;
171 	flock.client_id = fid->clnt->name;
172 	if (IS_SETLKW(cmd))
173 		flock.flags = P9_LOCK_FLAGS_BLOCK;
174 
175 	v9ses = v9fs_inode2v9ses(file_inode(filp));
176 
177 	/*
178 	 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
179 	 * for lock request, keep on trying
180 	 */
181 	for (;;) {
182 		res = p9_client_lock_dotl(fid, &flock, &status);
183 		if (res < 0)
184 			goto out_unlock;
185 
186 		if (status != P9_LOCK_BLOCKED)
187 			break;
188 		if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
189 			break;
190 		if (schedule_timeout_interruptible(v9ses->session_lock_timeout)
191 				!= 0)
192 			break;
193 		/*
194 		 * p9_client_lock_dotl overwrites flock.client_id with the
195 		 * server message, free and reuse the client name
196 		 */
197 		if (flock.client_id != fid->clnt->name) {
198 			kfree(flock.client_id);
199 			flock.client_id = fid->clnt->name;
200 		}
201 	}
202 
203 	/* map 9p status to VFS status */
204 	switch (status) {
205 	case P9_LOCK_SUCCESS:
206 		res = 0;
207 		break;
208 	case P9_LOCK_BLOCKED:
209 		res = -EAGAIN;
210 		break;
211 	default:
212 		WARN_ONCE(1, "unknown lock status code: %d\n", status);
213 		fallthrough;
214 	case P9_LOCK_ERROR:
215 	case P9_LOCK_GRACE:
216 		res = -ENOLCK;
217 		break;
218 	}
219 
220 out_unlock:
221 	/*
222 	 * incase server returned error for lock request, revert
223 	 * it locally
224 	 */
225 	if (res < 0 && fl->fl_type != F_UNLCK) {
226 		fl_type = fl->fl_type;
227 		fl->fl_type = F_UNLCK;
228 		/* Even if this fails we want to return the remote error */
229 		locks_lock_file_wait(filp, fl);
230 		fl->fl_type = fl_type;
231 	}
232 	if (flock.client_id != fid->clnt->name)
233 		kfree(flock.client_id);
234 out:
235 	return res;
236 }
237 
238 static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
239 {
240 	struct p9_getlock glock;
241 	struct p9_fid *fid;
242 	int res = 0;
243 
244 	fid = filp->private_data;
245 	BUG_ON(fid == NULL);
246 
247 	posix_test_lock(filp, fl);
248 	/*
249 	 * if we have a conflicting lock locally, no need to validate
250 	 * with server
251 	 */
252 	if (fl->fl_type != F_UNLCK)
253 		return res;
254 
255 	/* convert posix lock to p9 tgetlock args */
256 	memset(&glock, 0, sizeof(glock));
257 	glock.type  = P9_LOCK_TYPE_UNLCK;
258 	glock.start = fl->fl_start;
259 	if (fl->fl_end == OFFSET_MAX)
260 		glock.length = 0;
261 	else
262 		glock.length = fl->fl_end - fl->fl_start + 1;
263 	glock.proc_id = fl->fl_pid;
264 	glock.client_id = fid->clnt->name;
265 
266 	res = p9_client_getlock_dotl(fid, &glock);
267 	if (res < 0)
268 		goto out;
269 	/* map 9p lock type to os lock type */
270 	switch (glock.type) {
271 	case P9_LOCK_TYPE_RDLCK:
272 		fl->fl_type = F_RDLCK;
273 		break;
274 	case P9_LOCK_TYPE_WRLCK:
275 		fl->fl_type = F_WRLCK;
276 		break;
277 	case P9_LOCK_TYPE_UNLCK:
278 		fl->fl_type = F_UNLCK;
279 		break;
280 	}
281 	if (glock.type != P9_LOCK_TYPE_UNLCK) {
282 		fl->fl_start = glock.start;
283 		if (glock.length == 0)
284 			fl->fl_end = OFFSET_MAX;
285 		else
286 			fl->fl_end = glock.start + glock.length - 1;
287 		fl->fl_pid = -glock.proc_id;
288 	}
289 out:
290 	if (glock.client_id != fid->clnt->name)
291 		kfree(glock.client_id);
292 	return res;
293 }
294 
295 /**
296  * v9fs_file_lock_dotl - lock a file (or directory)
297  * @filp: file to be locked
298  * @cmd: lock command
299  * @fl: file lock structure
300  *
301  */
302 
303 static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
304 {
305 	struct inode *inode = file_inode(filp);
306 	int ret = -ENOLCK;
307 
308 	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
309 		 filp, cmd, fl, filp);
310 
311 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
312 		filemap_write_and_wait(inode->i_mapping);
313 		invalidate_mapping_pages(&inode->i_data, 0, -1);
314 	}
315 
316 	if (IS_SETLK(cmd) || IS_SETLKW(cmd))
317 		ret = v9fs_file_do_lock(filp, cmd, fl);
318 	else if (IS_GETLK(cmd))
319 		ret = v9fs_file_getlock(filp, fl);
320 	else
321 		ret = -EINVAL;
322 	return ret;
323 }
324 
325 /**
326  * v9fs_file_flock_dotl - lock a file
327  * @filp: file to be locked
328  * @cmd: lock command
329  * @fl: file lock structure
330  *
331  */
332 
333 static int v9fs_file_flock_dotl(struct file *filp, int cmd,
334 	struct file_lock *fl)
335 {
336 	struct inode *inode = file_inode(filp);
337 	int ret = -ENOLCK;
338 
339 	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
340 		 filp, cmd, fl, filp);
341 
342 	if (!(fl->fl_flags & FL_FLOCK))
343 		goto out_err;
344 
345 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
346 		filemap_write_and_wait(inode->i_mapping);
347 		invalidate_mapping_pages(&inode->i_data, 0, -1);
348 	}
349 	/* Convert flock to posix lock */
350 	fl->fl_flags |= FL_POSIX;
351 	fl->fl_flags ^= FL_FLOCK;
352 
353 	if (IS_SETLK(cmd) | IS_SETLKW(cmd))
354 		ret = v9fs_file_do_lock(filp, cmd, fl);
355 	else
356 		ret = -EINVAL;
357 out_err:
358 	return ret;
359 }
360 
361 /**
362  * v9fs_file_read - read from a file
363  * @filp: file pointer to read
364  * @udata: user data buffer to read data into
365  * @count: size of buffer
366  * @offset: offset at which to read data
367  *
368  */
369 
370 static ssize_t
371 v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
372 {
373 	struct p9_fid *fid = iocb->ki_filp->private_data;
374 	int ret, err = 0;
375 
376 	p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n",
377 		 iov_iter_count(to), iocb->ki_pos);
378 
379 	if (iocb->ki_filp->f_flags & O_NONBLOCK)
380 		ret = p9_client_read_once(fid, iocb->ki_pos, to, &err);
381 	else
382 		ret = p9_client_read(fid, iocb->ki_pos, to, &err);
383 	if (!ret)
384 		return err;
385 
386 	iocb->ki_pos += ret;
387 	return ret;
388 }
389 
390 /**
391  * v9fs_file_write - write to a file
392  * @filp: file pointer to write
393  * @data: data buffer to write data from
394  * @count: size of buffer
395  * @offset: offset at which to write data
396  *
397  */
398 static ssize_t
399 v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
400 {
401 	struct file *file = iocb->ki_filp;
402 	ssize_t retval;
403 	loff_t origin;
404 	int err = 0;
405 
406 	retval = generic_write_checks(iocb, from);
407 	if (retval <= 0)
408 		return retval;
409 
410 	origin = iocb->ki_pos;
411 	retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err);
412 	if (retval > 0) {
413 		struct inode *inode = file_inode(file);
414 		loff_t i_size;
415 		unsigned long pg_start, pg_end;
416 		pg_start = origin >> PAGE_SHIFT;
417 		pg_end = (origin + retval - 1) >> PAGE_SHIFT;
418 		if (inode->i_mapping && inode->i_mapping->nrpages)
419 			invalidate_inode_pages2_range(inode->i_mapping,
420 						      pg_start, pg_end);
421 		iocb->ki_pos += retval;
422 		i_size = i_size_read(inode);
423 		if (iocb->ki_pos > i_size) {
424 			inode_add_bytes(inode, iocb->ki_pos - i_size);
425 			/*
426 			 * Need to serialize against i_size_write() in
427 			 * v9fs_stat2inode()
428 			 */
429 			v9fs_i_size_write(inode, iocb->ki_pos);
430 		}
431 		return retval;
432 	}
433 	return err;
434 }
435 
436 static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
437 			   int datasync)
438 {
439 	struct p9_fid *fid;
440 	struct inode *inode = filp->f_mapping->host;
441 	struct p9_wstat wstat;
442 	int retval;
443 
444 	retval = file_write_and_wait_range(filp, start, end);
445 	if (retval)
446 		return retval;
447 
448 	inode_lock(inode);
449 	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
450 
451 	fid = filp->private_data;
452 	v9fs_blank_wstat(&wstat);
453 
454 	retval = p9_client_wstat(fid, &wstat);
455 	inode_unlock(inode);
456 
457 	return retval;
458 }
459 
460 int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
461 			 int datasync)
462 {
463 	struct p9_fid *fid;
464 	struct inode *inode = filp->f_mapping->host;
465 	int retval;
466 
467 	retval = file_write_and_wait_range(filp, start, end);
468 	if (retval)
469 		return retval;
470 
471 	inode_lock(inode);
472 	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
473 
474 	fid = filp->private_data;
475 
476 	retval = p9_client_fsync(fid, datasync);
477 	inode_unlock(inode);
478 
479 	return retval;
480 }
481 
482 static int
483 v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
484 {
485 	int retval;
486 
487 
488 	retval = generic_file_mmap(filp, vma);
489 	if (!retval)
490 		vma->vm_ops = &v9fs_file_vm_ops;
491 
492 	return retval;
493 }
494 
495 static int
496 v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
497 {
498 	int retval;
499 	struct inode *inode;
500 	struct v9fs_inode *v9inode;
501 	struct p9_fid *fid;
502 
503 	inode = file_inode(filp);
504 	v9inode = V9FS_I(inode);
505 	mutex_lock(&v9inode->v_mutex);
506 	if (!v9inode->writeback_fid &&
507 	    (vma->vm_flags & VM_SHARED) &&
508 	    (vma->vm_flags & VM_WRITE)) {
509 		/*
510 		 * clone a fid and add it to writeback_fid
511 		 * we do it during mmap instead of
512 		 * page dirty time via write_begin/page_mkwrite
513 		 * because we want write after unlink usecase
514 		 * to work.
515 		 */
516 		fid = v9fs_writeback_fid(file_dentry(filp));
517 		if (IS_ERR(fid)) {
518 			retval = PTR_ERR(fid);
519 			mutex_unlock(&v9inode->v_mutex);
520 			return retval;
521 		}
522 		v9inode->writeback_fid = (void *) fid;
523 	}
524 	mutex_unlock(&v9inode->v_mutex);
525 
526 	retval = generic_file_mmap(filp, vma);
527 	if (!retval)
528 		vma->vm_ops = &v9fs_mmap_file_vm_ops;
529 
530 	return retval;
531 }
532 
533 static vm_fault_t
534 v9fs_vm_page_mkwrite(struct vm_fault *vmf)
535 {
536 	struct v9fs_inode *v9inode;
537 	struct page *page = vmf->page;
538 	struct file *filp = vmf->vma->vm_file;
539 	struct inode *inode = file_inode(filp);
540 
541 
542 	p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n",
543 		 page, (unsigned long)filp->private_data);
544 
545 	/* Update file times before taking page lock */
546 	file_update_time(filp);
547 
548 	v9inode = V9FS_I(inode);
549 	/* make sure the cache has finished storing the page */
550 	v9fs_fscache_wait_on_page_write(inode, page);
551 	BUG_ON(!v9inode->writeback_fid);
552 	lock_page(page);
553 	if (page->mapping != inode->i_mapping)
554 		goto out_unlock;
555 	wait_for_stable_page(page);
556 
557 	return VM_FAULT_LOCKED;
558 out_unlock:
559 	unlock_page(page);
560 	return VM_FAULT_NOPAGE;
561 }
562 
563 /**
564  * v9fs_mmap_file_read - read from a file
565  * @filp: file pointer to read
566  * @data: user data buffer to read data into
567  * @count: size of buffer
568  * @offset: offset at which to read data
569  *
570  */
571 static ssize_t
572 v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
573 {
574 	/* TODO: Check if there are dirty pages */
575 	return v9fs_file_read_iter(iocb, to);
576 }
577 
578 /**
579  * v9fs_mmap_file_write - write to a file
580  * @filp: file pointer to write
581  * @data: data buffer to write data from
582  * @count: size of buffer
583  * @offset: offset at which to write data
584  *
585  */
586 static ssize_t
587 v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
588 {
589 	/*
590 	 * TODO: invalidate mmaps on filp's inode between
591 	 * offset and offset+count
592 	 */
593 	return v9fs_file_write_iter(iocb, from);
594 }
595 
596 static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
597 {
598 	struct inode *inode;
599 
600 	struct writeback_control wbc = {
601 		.nr_to_write = LONG_MAX,
602 		.sync_mode = WB_SYNC_ALL,
603 		.range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE,
604 		 /* absolute end, byte at end included */
605 		.range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE +
606 			(vma->vm_end - vma->vm_start - 1),
607 	};
608 
609 	if (!(vma->vm_flags & VM_SHARED))
610 		return;
611 
612 	p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
613 
614 	inode = file_inode(vma->vm_file);
615 	filemap_fdatawrite_wbc(inode->i_mapping, &wbc);
616 }
617 
618 
619 static const struct vm_operations_struct v9fs_file_vm_ops = {
620 	.fault = filemap_fault,
621 	.map_pages = filemap_map_pages,
622 	.page_mkwrite = v9fs_vm_page_mkwrite,
623 };
624 
625 static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
626 	.close = v9fs_mmap_vm_close,
627 	.fault = filemap_fault,
628 	.map_pages = filemap_map_pages,
629 	.page_mkwrite = v9fs_vm_page_mkwrite,
630 };
631 
632 
633 const struct file_operations v9fs_cached_file_operations = {
634 	.llseek = generic_file_llseek,
635 	.read_iter = generic_file_read_iter,
636 	.write_iter = generic_file_write_iter,
637 	.open = v9fs_file_open,
638 	.release = v9fs_dir_release,
639 	.lock = v9fs_file_lock,
640 	.mmap = v9fs_file_mmap,
641 	.splice_read = generic_file_splice_read,
642 	.splice_write = iter_file_splice_write,
643 	.fsync = v9fs_file_fsync,
644 };
645 
646 const struct file_operations v9fs_cached_file_operations_dotl = {
647 	.llseek = generic_file_llseek,
648 	.read_iter = generic_file_read_iter,
649 	.write_iter = generic_file_write_iter,
650 	.open = v9fs_file_open,
651 	.release = v9fs_dir_release,
652 	.lock = v9fs_file_lock_dotl,
653 	.flock = v9fs_file_flock_dotl,
654 	.mmap = v9fs_file_mmap,
655 	.splice_read = generic_file_splice_read,
656 	.splice_write = iter_file_splice_write,
657 	.fsync = v9fs_file_fsync_dotl,
658 };
659 
660 const struct file_operations v9fs_file_operations = {
661 	.llseek = generic_file_llseek,
662 	.read_iter = v9fs_file_read_iter,
663 	.write_iter = v9fs_file_write_iter,
664 	.open = v9fs_file_open,
665 	.release = v9fs_dir_release,
666 	.lock = v9fs_file_lock,
667 	.mmap = generic_file_readonly_mmap,
668 	.splice_read = generic_file_splice_read,
669 	.splice_write = iter_file_splice_write,
670 	.fsync = v9fs_file_fsync,
671 };
672 
673 const struct file_operations v9fs_file_operations_dotl = {
674 	.llseek = generic_file_llseek,
675 	.read_iter = v9fs_file_read_iter,
676 	.write_iter = v9fs_file_write_iter,
677 	.open = v9fs_file_open,
678 	.release = v9fs_dir_release,
679 	.lock = v9fs_file_lock_dotl,
680 	.flock = v9fs_file_flock_dotl,
681 	.mmap = generic_file_readonly_mmap,
682 	.splice_read = generic_file_splice_read,
683 	.splice_write = iter_file_splice_write,
684 	.fsync = v9fs_file_fsync_dotl,
685 };
686 
687 const struct file_operations v9fs_mmap_file_operations = {
688 	.llseek = generic_file_llseek,
689 	.read_iter = v9fs_mmap_file_read_iter,
690 	.write_iter = v9fs_mmap_file_write_iter,
691 	.open = v9fs_file_open,
692 	.release = v9fs_dir_release,
693 	.lock = v9fs_file_lock,
694 	.mmap = v9fs_mmap_file_mmap,
695 	.splice_read = generic_file_splice_read,
696 	.splice_write = iter_file_splice_write,
697 	.fsync = v9fs_file_fsync,
698 };
699 
700 const struct file_operations v9fs_mmap_file_operations_dotl = {
701 	.llseek = generic_file_llseek,
702 	.read_iter = v9fs_mmap_file_read_iter,
703 	.write_iter = v9fs_mmap_file_write_iter,
704 	.open = v9fs_file_open,
705 	.release = v9fs_dir_release,
706 	.lock = v9fs_file_lock_dotl,
707 	.flock = v9fs_file_flock_dotl,
708 	.mmap = v9fs_mmap_file_mmap,
709 	.splice_read = generic_file_splice_read,
710 	.splice_write = iter_file_splice_write,
711 	.fsync = v9fs_file_fsync_dotl,
712 };
713