xref: /openbmc/linux/fs/9p/vfs_file.c (revision d9bc0d11)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file contians vfs file ops for 9P2000.
4  *
5  *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
6  *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/errno.h>
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/sched.h>
14 #include <linux/file.h>
15 #include <linux/stat.h>
16 #include <linux/string.h>
17 #include <linux/inet.h>
18 #include <linux/list.h>
19 #include <linux/pagemap.h>
20 #include <linux/utsname.h>
21 #include <linux/uaccess.h>
22 #include <linux/uio.h>
23 #include <linux/slab.h>
24 #include <net/9p/9p.h>
25 #include <net/9p/client.h>
26 
27 #include "v9fs.h"
28 #include "v9fs_vfs.h"
29 #include "fid.h"
30 #include "cache.h"
31 
32 static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
33 
34 /**
35  * v9fs_file_open - open a file (or directory)
36  * @inode: inode to be opened
37  * @file: file being opened
38  *
39  */
40 
41 int v9fs_file_open(struct inode *inode, struct file *file)
42 {
43 	int err;
44 	struct v9fs_inode *v9inode;
45 	struct v9fs_session_info *v9ses;
46 	struct p9_fid *fid, *writeback_fid;
47 	int omode;
48 
49 	p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
50 	v9inode = V9FS_I(inode);
51 	v9ses = v9fs_inode2v9ses(inode);
52 	if (v9fs_proto_dotl(v9ses))
53 		omode = v9fs_open_to_dotl_flags(file->f_flags);
54 	else
55 		omode = v9fs_uflags2omode(file->f_flags,
56 					v9fs_proto_dotu(v9ses));
57 	fid = file->private_data;
58 	if (!fid) {
59 		fid = v9fs_fid_clone(file_dentry(file));
60 		if (IS_ERR(fid))
61 			return PTR_ERR(fid);
62 
63 		err = p9_client_open(fid, omode);
64 		if (err < 0) {
65 			p9_fid_put(fid);
66 			return err;
67 		}
68 		if ((file->f_flags & O_APPEND) &&
69 			(!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
70 			generic_file_llseek(file, 0, SEEK_END);
71 
72 		file->private_data = fid;
73 	}
74 
75 	mutex_lock(&v9inode->v_mutex);
76 	if ((v9ses->cache >= CACHE_WRITEBACK) && !v9inode->writeback_fid &&
77 	    ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
78 		/*
79 		 * clone a fid and add it to writeback_fid
80 		 * we do it during open time instead of
81 		 * page dirty time via write_begin/page_mkwrite
82 		 * because we want write after unlink usecase
83 		 * to work.
84 		 */
85 		writeback_fid = v9fs_writeback_fid(file_dentry(file));
86 		if (IS_ERR(writeback_fid)) {
87 			err = PTR_ERR(writeback_fid);
88 			mutex_unlock(&v9inode->v_mutex);
89 			goto out_error;
90 		}
91 		v9inode->writeback_fid = (void *) writeback_fid;
92 	}
93 	mutex_unlock(&v9inode->v_mutex);
94 #ifdef CONFIG_9P_FSCACHE
95 	if (v9ses->cache == CACHE_FSCACHE)
96 		fscache_use_cookie(v9fs_inode_cookie(v9inode),
97 				   file->f_mode & FMODE_WRITE);
98 #endif
99 	v9fs_open_fid_add(inode, &fid);
100 	return 0;
101 out_error:
102 	p9_fid_put(file->private_data);
103 	file->private_data = NULL;
104 	return err;
105 }
106 
107 /**
108  * v9fs_file_lock - lock a file (or directory)
109  * @filp: file to be locked
110  * @cmd: lock command
111  * @fl: file lock structure
112  *
113  * Bugs: this looks like a local only lock, we should extend into 9P
114  *       by using open exclusive
115  */
116 
117 static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
118 {
119 	struct inode *inode = file_inode(filp);
120 
121 	p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
122 
123 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
124 		filemap_write_and_wait(inode->i_mapping);
125 		invalidate_mapping_pages(&inode->i_data, 0, -1);
126 	}
127 
128 	return 0;
129 }
130 
131 static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
132 {
133 	struct p9_flock flock;
134 	struct p9_fid *fid;
135 	uint8_t status = P9_LOCK_ERROR;
136 	int res = 0;
137 	unsigned char fl_type;
138 	struct v9fs_session_info *v9ses;
139 
140 	fid = filp->private_data;
141 	BUG_ON(fid == NULL);
142 
143 	BUG_ON((fl->fl_flags & FL_POSIX) != FL_POSIX);
144 
145 	res = locks_lock_file_wait(filp, fl);
146 	if (res < 0)
147 		goto out;
148 
149 	/* convert posix lock to p9 tlock args */
150 	memset(&flock, 0, sizeof(flock));
151 	/* map the lock type */
152 	switch (fl->fl_type) {
153 	case F_RDLCK:
154 		flock.type = P9_LOCK_TYPE_RDLCK;
155 		break;
156 	case F_WRLCK:
157 		flock.type = P9_LOCK_TYPE_WRLCK;
158 		break;
159 	case F_UNLCK:
160 		flock.type = P9_LOCK_TYPE_UNLCK;
161 		break;
162 	}
163 	flock.start = fl->fl_start;
164 	if (fl->fl_end == OFFSET_MAX)
165 		flock.length = 0;
166 	else
167 		flock.length = fl->fl_end - fl->fl_start + 1;
168 	flock.proc_id = fl->fl_pid;
169 	flock.client_id = fid->clnt->name;
170 	if (IS_SETLKW(cmd))
171 		flock.flags = P9_LOCK_FLAGS_BLOCK;
172 
173 	v9ses = v9fs_inode2v9ses(file_inode(filp));
174 
175 	/*
176 	 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
177 	 * for lock request, keep on trying
178 	 */
179 	for (;;) {
180 		res = p9_client_lock_dotl(fid, &flock, &status);
181 		if (res < 0)
182 			goto out_unlock;
183 
184 		if (status != P9_LOCK_BLOCKED)
185 			break;
186 		if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
187 			break;
188 		if (schedule_timeout_interruptible(v9ses->session_lock_timeout)
189 				!= 0)
190 			break;
191 		/*
192 		 * p9_client_lock_dotl overwrites flock.client_id with the
193 		 * server message, free and reuse the client name
194 		 */
195 		if (flock.client_id != fid->clnt->name) {
196 			kfree(flock.client_id);
197 			flock.client_id = fid->clnt->name;
198 		}
199 	}
200 
201 	/* map 9p status to VFS status */
202 	switch (status) {
203 	case P9_LOCK_SUCCESS:
204 		res = 0;
205 		break;
206 	case P9_LOCK_BLOCKED:
207 		res = -EAGAIN;
208 		break;
209 	default:
210 		WARN_ONCE(1, "unknown lock status code: %d\n", status);
211 		fallthrough;
212 	case P9_LOCK_ERROR:
213 	case P9_LOCK_GRACE:
214 		res = -ENOLCK;
215 		break;
216 	}
217 
218 out_unlock:
219 	/*
220 	 * incase server returned error for lock request, revert
221 	 * it locally
222 	 */
223 	if (res < 0 && fl->fl_type != F_UNLCK) {
224 		fl_type = fl->fl_type;
225 		fl->fl_type = F_UNLCK;
226 		/* Even if this fails we want to return the remote error */
227 		locks_lock_file_wait(filp, fl);
228 		fl->fl_type = fl_type;
229 	}
230 	if (flock.client_id != fid->clnt->name)
231 		kfree(flock.client_id);
232 out:
233 	return res;
234 }
235 
236 static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
237 {
238 	struct p9_getlock glock;
239 	struct p9_fid *fid;
240 	int res = 0;
241 
242 	fid = filp->private_data;
243 	BUG_ON(fid == NULL);
244 
245 	posix_test_lock(filp, fl);
246 	/*
247 	 * if we have a conflicting lock locally, no need to validate
248 	 * with server
249 	 */
250 	if (fl->fl_type != F_UNLCK)
251 		return res;
252 
253 	/* convert posix lock to p9 tgetlock args */
254 	memset(&glock, 0, sizeof(glock));
255 	glock.type  = P9_LOCK_TYPE_UNLCK;
256 	glock.start = fl->fl_start;
257 	if (fl->fl_end == OFFSET_MAX)
258 		glock.length = 0;
259 	else
260 		glock.length = fl->fl_end - fl->fl_start + 1;
261 	glock.proc_id = fl->fl_pid;
262 	glock.client_id = fid->clnt->name;
263 
264 	res = p9_client_getlock_dotl(fid, &glock);
265 	if (res < 0)
266 		goto out;
267 	/* map 9p lock type to os lock type */
268 	switch (glock.type) {
269 	case P9_LOCK_TYPE_RDLCK:
270 		fl->fl_type = F_RDLCK;
271 		break;
272 	case P9_LOCK_TYPE_WRLCK:
273 		fl->fl_type = F_WRLCK;
274 		break;
275 	case P9_LOCK_TYPE_UNLCK:
276 		fl->fl_type = F_UNLCK;
277 		break;
278 	}
279 	if (glock.type != P9_LOCK_TYPE_UNLCK) {
280 		fl->fl_start = glock.start;
281 		if (glock.length == 0)
282 			fl->fl_end = OFFSET_MAX;
283 		else
284 			fl->fl_end = glock.start + glock.length - 1;
285 		fl->fl_pid = -glock.proc_id;
286 	}
287 out:
288 	if (glock.client_id != fid->clnt->name)
289 		kfree(glock.client_id);
290 	return res;
291 }
292 
293 /**
294  * v9fs_file_lock_dotl - lock a file (or directory)
295  * @filp: file to be locked
296  * @cmd: lock command
297  * @fl: file lock structure
298  *
299  */
300 
301 static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
302 {
303 	struct inode *inode = file_inode(filp);
304 	int ret = -ENOLCK;
305 
306 	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
307 		 filp, cmd, fl, filp);
308 
309 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
310 		filemap_write_and_wait(inode->i_mapping);
311 		invalidate_mapping_pages(&inode->i_data, 0, -1);
312 	}
313 
314 	if (IS_SETLK(cmd) || IS_SETLKW(cmd))
315 		ret = v9fs_file_do_lock(filp, cmd, fl);
316 	else if (IS_GETLK(cmd))
317 		ret = v9fs_file_getlock(filp, fl);
318 	else
319 		ret = -EINVAL;
320 	return ret;
321 }
322 
323 /**
324  * v9fs_file_flock_dotl - lock a file
325  * @filp: file to be locked
326  * @cmd: lock command
327  * @fl: file lock structure
328  *
329  */
330 
331 static int v9fs_file_flock_dotl(struct file *filp, int cmd,
332 	struct file_lock *fl)
333 {
334 	struct inode *inode = file_inode(filp);
335 	int ret = -ENOLCK;
336 
337 	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
338 		 filp, cmd, fl, filp);
339 
340 	if (!(fl->fl_flags & FL_FLOCK))
341 		goto out_err;
342 
343 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
344 		filemap_write_and_wait(inode->i_mapping);
345 		invalidate_mapping_pages(&inode->i_data, 0, -1);
346 	}
347 	/* Convert flock to posix lock */
348 	fl->fl_flags |= FL_POSIX;
349 	fl->fl_flags ^= FL_FLOCK;
350 
351 	if (IS_SETLK(cmd) | IS_SETLKW(cmd))
352 		ret = v9fs_file_do_lock(filp, cmd, fl);
353 	else
354 		ret = -EINVAL;
355 out_err:
356 	return ret;
357 }
358 
359 /**
360  * v9fs_file_read_iter - read from a file
361  * @iocb: The operation parameters
362  * @to: The buffer to read into
363  *
364  */
365 static ssize_t
366 v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
367 {
368 	struct p9_fid *fid = iocb->ki_filp->private_data;
369 	int ret, err = 0;
370 	struct inode *inode = file_inode(iocb->ki_filp);
371 	struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
372 
373 	p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n",
374 		 iov_iter_count(to), iocb->ki_pos);
375 
376 	if (v9ses->cache > CACHE_MMAP)
377 		return generic_file_read_iter(iocb, to);
378 
379 	if (iocb->ki_filp->f_flags & O_NONBLOCK)
380 		ret = p9_client_read_once(fid, iocb->ki_pos, to, &err);
381 	else
382 		ret = p9_client_read(fid, iocb->ki_pos, to, &err);
383 	if (!ret)
384 		return err;
385 
386 	iocb->ki_pos += ret;
387 	return ret;
388 }
389 
390 /**
391  * v9fs_file_write_iter - write to a file
392  * @iocb: The operation parameters
393  * @from: The data to write
394  *
395  */
396 static ssize_t
397 v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
398 {
399 	struct file *file = iocb->ki_filp;
400 	ssize_t retval;
401 	loff_t origin;
402 	int err = 0;
403 	struct inode *inode = file_inode(iocb->ki_filp);
404 	struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
405 
406 	if (v9ses->cache >= CACHE_WRITEBACK)
407 		return generic_file_write_iter(iocb, from);
408 
409 	retval = generic_write_checks(iocb, from);
410 	if (retval <= 0)
411 		return retval;
412 
413 	origin = iocb->ki_pos;
414 	retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err);
415 	if (retval > 0) {
416 		struct inode *inode = file_inode(file);
417 		loff_t i_size;
418 		unsigned long pg_start, pg_end;
419 
420 		pg_start = origin >> PAGE_SHIFT;
421 		pg_end = (origin + retval - 1) >> PAGE_SHIFT;
422 		if (inode->i_mapping && inode->i_mapping->nrpages)
423 			invalidate_inode_pages2_range(inode->i_mapping,
424 						      pg_start, pg_end);
425 		iocb->ki_pos += retval;
426 		i_size = i_size_read(inode);
427 		if (iocb->ki_pos > i_size) {
428 			inode_add_bytes(inode, iocb->ki_pos - i_size);
429 			/*
430 			 * Need to serialize against i_size_write() in
431 			 * v9fs_stat2inode()
432 			 */
433 			v9fs_i_size_write(inode, iocb->ki_pos);
434 		}
435 		return retval;
436 	}
437 	return err;
438 }
439 
440 static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
441 			   int datasync)
442 {
443 	struct p9_fid *fid;
444 	struct inode *inode = filp->f_mapping->host;
445 	struct p9_wstat wstat;
446 	int retval;
447 
448 	retval = file_write_and_wait_range(filp, start, end);
449 	if (retval)
450 		return retval;
451 
452 	inode_lock(inode);
453 	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
454 
455 	fid = filp->private_data;
456 	v9fs_blank_wstat(&wstat);
457 
458 	retval = p9_client_wstat(fid, &wstat);
459 	inode_unlock(inode);
460 
461 	return retval;
462 }
463 
464 int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
465 			 int datasync)
466 {
467 	struct p9_fid *fid;
468 	struct inode *inode = filp->f_mapping->host;
469 	int retval;
470 
471 	retval = file_write_and_wait_range(filp, start, end);
472 	if (retval)
473 		return retval;
474 
475 	inode_lock(inode);
476 	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
477 
478 	fid = filp->private_data;
479 
480 	retval = p9_client_fsync(fid, datasync);
481 	inode_unlock(inode);
482 
483 	return retval;
484 }
485 
486 static int
487 v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
488 {
489 	int retval;
490 	struct inode *inode = file_inode(filp);
491 	struct v9fs_inode *v9inode = V9FS_I(inode);
492 	struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
493 	struct p9_fid *fid;
494 
495 	if (v9ses->cache < CACHE_MMAP) {
496 		invalidate_inode_pages2(filp->f_mapping);
497 		return generic_file_readonly_mmap(filp, vma);
498 	}
499 
500 	mutex_lock(&v9inode->v_mutex);
501 	if (!v9inode->writeback_fid &&
502 	    (vma->vm_flags & VM_SHARED) &&
503 	    (vma->vm_flags & VM_WRITE)) {
504 		/*
505 		 * clone a fid and add it to writeback_fid
506 		 * we do it during mmap instead of
507 		 * page dirty time via write_begin/page_mkwrite
508 		 * because we want write after unlink usecase
509 		 * to work.
510 		 */
511 		fid = v9fs_writeback_fid(file_dentry(filp));
512 		if (IS_ERR(fid)) {
513 			retval = PTR_ERR(fid);
514 			mutex_unlock(&v9inode->v_mutex);
515 			return retval;
516 		}
517 		v9inode->writeback_fid = (void *) fid;
518 	}
519 	mutex_unlock(&v9inode->v_mutex);
520 
521 	retval = generic_file_mmap(filp, vma);
522 	if (!retval)
523 		vma->vm_ops = &v9fs_mmap_file_vm_ops;
524 
525 	return retval;
526 }
527 
528 static vm_fault_t
529 v9fs_vm_page_mkwrite(struct vm_fault *vmf)
530 {
531 	struct v9fs_inode *v9inode;
532 	struct folio *folio = page_folio(vmf->page);
533 	struct file *filp = vmf->vma->vm_file;
534 	struct inode *inode = file_inode(filp);
535 
536 
537 	p9_debug(P9_DEBUG_VFS, "folio %p fid %lx\n",
538 		 folio, (unsigned long)filp->private_data);
539 
540 	v9inode = V9FS_I(inode);
541 
542 	/* Wait for the page to be written to the cache before we allow it to
543 	 * be modified.  We then assume the entire page will need writing back.
544 	 */
545 #ifdef CONFIG_9P_FSCACHE
546 	if (folio_test_fscache(folio) &&
547 	    folio_wait_fscache_killable(folio) < 0)
548 		return VM_FAULT_NOPAGE;
549 #endif
550 
551 	/* Update file times before taking page lock */
552 	file_update_time(filp);
553 
554 	BUG_ON(!v9inode->writeback_fid);
555 	if (folio_lock_killable(folio) < 0)
556 		return VM_FAULT_RETRY;
557 	if (folio_mapping(folio) != inode->i_mapping)
558 		goto out_unlock;
559 	folio_wait_stable(folio);
560 
561 	return VM_FAULT_LOCKED;
562 out_unlock:
563 	folio_unlock(folio);
564 	return VM_FAULT_NOPAGE;
565 }
566 
567 static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
568 {
569 	struct inode *inode;
570 
571 	struct writeback_control wbc = {
572 		.nr_to_write = LONG_MAX,
573 		.sync_mode = WB_SYNC_ALL,
574 		.range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE,
575 		 /* absolute end, byte at end included */
576 		.range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE +
577 			(vma->vm_end - vma->vm_start - 1),
578 	};
579 
580 	if (!(vma->vm_flags & VM_SHARED))
581 		return;
582 
583 	p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
584 
585 	inode = file_inode(vma->vm_file);
586 	filemap_fdatawrite_wbc(inode->i_mapping, &wbc);
587 }
588 
589 static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
590 	.close = v9fs_mmap_vm_close,
591 	.fault = filemap_fault,
592 	.map_pages = filemap_map_pages,
593 	.page_mkwrite = v9fs_vm_page_mkwrite,
594 };
595 
596 const struct file_operations v9fs_file_operations = {
597 	.llseek = generic_file_llseek,
598 	.read_iter = v9fs_file_read_iter,
599 	.write_iter = v9fs_file_write_iter,
600 	.open = v9fs_file_open,
601 	.release = v9fs_dir_release,
602 	.lock = v9fs_file_lock,
603 	.mmap = generic_file_readonly_mmap,
604 	.splice_read = generic_file_splice_read,
605 	.splice_write = iter_file_splice_write,
606 	.fsync = v9fs_file_fsync,
607 };
608 
609 const struct file_operations v9fs_file_operations_dotl = {
610 	.llseek = generic_file_llseek,
611 	.read_iter = v9fs_file_read_iter,
612 	.write_iter = v9fs_file_write_iter,
613 	.open = v9fs_file_open,
614 	.release = v9fs_dir_release,
615 	.lock = v9fs_file_lock_dotl,
616 	.flock = v9fs_file_flock_dotl,
617 	.mmap = v9fs_file_mmap,
618 	.splice_read = generic_file_splice_read,
619 	.splice_write = iter_file_splice_write,
620 	.fsync = v9fs_file_fsync_dotl,
621 };
622