1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * This file contians vfs file ops for 9P2000. 4 * 5 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> 6 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> 7 */ 8 9 #include <linux/module.h> 10 #include <linux/errno.h> 11 #include <linux/fs.h> 12 #include <linux/sched.h> 13 #include <linux/file.h> 14 #include <linux/stat.h> 15 #include <linux/string.h> 16 #include <linux/inet.h> 17 #include <linux/list.h> 18 #include <linux/pagemap.h> 19 #include <linux/utsname.h> 20 #include <linux/uaccess.h> 21 #include <linux/idr.h> 22 #include <linux/uio.h> 23 #include <linux/slab.h> 24 #include <net/9p/9p.h> 25 #include <net/9p/client.h> 26 27 #include "v9fs.h" 28 #include "v9fs_vfs.h" 29 #include "fid.h" 30 #include "cache.h" 31 32 static const struct vm_operations_struct v9fs_file_vm_ops; 33 static const struct vm_operations_struct v9fs_mmap_file_vm_ops; 34 35 /** 36 * v9fs_file_open - open a file (or directory) 37 * @inode: inode to be opened 38 * @file: file being opened 39 * 40 */ 41 42 int v9fs_file_open(struct inode *inode, struct file *file) 43 { 44 int err; 45 struct v9fs_inode *v9inode; 46 struct v9fs_session_info *v9ses; 47 struct p9_fid *fid, *writeback_fid; 48 int omode; 49 50 p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file); 51 v9inode = V9FS_I(inode); 52 v9ses = v9fs_inode2v9ses(inode); 53 if (v9fs_proto_dotl(v9ses)) 54 omode = v9fs_open_to_dotl_flags(file->f_flags); 55 else 56 omode = v9fs_uflags2omode(file->f_flags, 57 v9fs_proto_dotu(v9ses)); 58 fid = file->private_data; 59 if (!fid) { 60 fid = v9fs_fid_clone(file_dentry(file)); 61 if (IS_ERR(fid)) 62 return PTR_ERR(fid); 63 64 err = p9_client_open(fid, omode); 65 if (err < 0) { 66 p9_client_clunk(fid); 67 return err; 68 } 69 if ((file->f_flags & O_APPEND) && 70 (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses))) 71 generic_file_llseek(file, 0, SEEK_END); 72 } 73 74 file->private_data = fid; 75 mutex_lock(&v9inode->v_mutex); 76 if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) && 77 !v9inode->writeback_fid && 78 ((file->f_flags & O_ACCMODE) != O_RDONLY)) { 79 /* 80 * clone a fid and add it to writeback_fid 81 * we do it during open time instead of 82 * page dirty time via write_begin/page_mkwrite 83 * because we want write after unlink usecase 84 * to work. 85 */ 86 writeback_fid = v9fs_writeback_fid(file_dentry(file)); 87 if (IS_ERR(writeback_fid)) { 88 err = PTR_ERR(writeback_fid); 89 mutex_unlock(&v9inode->v_mutex); 90 goto out_error; 91 } 92 v9inode->writeback_fid = (void *) writeback_fid; 93 } 94 mutex_unlock(&v9inode->v_mutex); 95 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) 96 v9fs_cache_inode_set_cookie(inode, file); 97 v9fs_open_fid_add(inode, fid); 98 return 0; 99 out_error: 100 p9_client_clunk(file->private_data); 101 file->private_data = NULL; 102 return err; 103 } 104 105 /** 106 * v9fs_file_lock - lock a file (or directory) 107 * @filp: file to be locked 108 * @cmd: lock command 109 * @fl: file lock structure 110 * 111 * Bugs: this looks like a local only lock, we should extend into 9P 112 * by using open exclusive 113 */ 114 115 static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl) 116 { 117 int res = 0; 118 struct inode *inode = file_inode(filp); 119 120 p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl); 121 122 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { 123 filemap_write_and_wait(inode->i_mapping); 124 invalidate_mapping_pages(&inode->i_data, 0, -1); 125 } 126 127 return res; 128 } 129 130 static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl) 131 { 132 struct p9_flock flock; 133 struct p9_fid *fid; 134 uint8_t status = P9_LOCK_ERROR; 135 int res = 0; 136 unsigned char fl_type; 137 struct v9fs_session_info *v9ses; 138 139 fid = filp->private_data; 140 BUG_ON(fid == NULL); 141 142 if ((fl->fl_flags & FL_POSIX) != FL_POSIX) 143 BUG(); 144 145 res = locks_lock_file_wait(filp, fl); 146 if (res < 0) 147 goto out; 148 149 /* convert posix lock to p9 tlock args */ 150 memset(&flock, 0, sizeof(flock)); 151 /* map the lock type */ 152 switch (fl->fl_type) { 153 case F_RDLCK: 154 flock.type = P9_LOCK_TYPE_RDLCK; 155 break; 156 case F_WRLCK: 157 flock.type = P9_LOCK_TYPE_WRLCK; 158 break; 159 case F_UNLCK: 160 flock.type = P9_LOCK_TYPE_UNLCK; 161 break; 162 } 163 flock.start = fl->fl_start; 164 if (fl->fl_end == OFFSET_MAX) 165 flock.length = 0; 166 else 167 flock.length = fl->fl_end - fl->fl_start + 1; 168 flock.proc_id = fl->fl_pid; 169 flock.client_id = fid->clnt->name; 170 if (IS_SETLKW(cmd)) 171 flock.flags = P9_LOCK_FLAGS_BLOCK; 172 173 v9ses = v9fs_inode2v9ses(file_inode(filp)); 174 175 /* 176 * if its a blocked request and we get P9_LOCK_BLOCKED as the status 177 * for lock request, keep on trying 178 */ 179 for (;;) { 180 res = p9_client_lock_dotl(fid, &flock, &status); 181 if (res < 0) 182 goto out_unlock; 183 184 if (status != P9_LOCK_BLOCKED) 185 break; 186 if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd)) 187 break; 188 if (schedule_timeout_interruptible(v9ses->session_lock_timeout) 189 != 0) 190 break; 191 /* 192 * p9_client_lock_dotl overwrites flock.client_id with the 193 * server message, free and reuse the client name 194 */ 195 if (flock.client_id != fid->clnt->name) { 196 kfree(flock.client_id); 197 flock.client_id = fid->clnt->name; 198 } 199 } 200 201 /* map 9p status to VFS status */ 202 switch (status) { 203 case P9_LOCK_SUCCESS: 204 res = 0; 205 break; 206 case P9_LOCK_BLOCKED: 207 res = -EAGAIN; 208 break; 209 default: 210 WARN_ONCE(1, "unknown lock status code: %d\n", status); 211 fallthrough; 212 case P9_LOCK_ERROR: 213 case P9_LOCK_GRACE: 214 res = -ENOLCK; 215 break; 216 } 217 218 out_unlock: 219 /* 220 * incase server returned error for lock request, revert 221 * it locally 222 */ 223 if (res < 0 && fl->fl_type != F_UNLCK) { 224 fl_type = fl->fl_type; 225 fl->fl_type = F_UNLCK; 226 /* Even if this fails we want to return the remote error */ 227 locks_lock_file_wait(filp, fl); 228 fl->fl_type = fl_type; 229 } 230 if (flock.client_id != fid->clnt->name) 231 kfree(flock.client_id); 232 out: 233 return res; 234 } 235 236 static int v9fs_file_getlock(struct file *filp, struct file_lock *fl) 237 { 238 struct p9_getlock glock; 239 struct p9_fid *fid; 240 int res = 0; 241 242 fid = filp->private_data; 243 BUG_ON(fid == NULL); 244 245 posix_test_lock(filp, fl); 246 /* 247 * if we have a conflicting lock locally, no need to validate 248 * with server 249 */ 250 if (fl->fl_type != F_UNLCK) 251 return res; 252 253 /* convert posix lock to p9 tgetlock args */ 254 memset(&glock, 0, sizeof(glock)); 255 glock.type = P9_LOCK_TYPE_UNLCK; 256 glock.start = fl->fl_start; 257 if (fl->fl_end == OFFSET_MAX) 258 glock.length = 0; 259 else 260 glock.length = fl->fl_end - fl->fl_start + 1; 261 glock.proc_id = fl->fl_pid; 262 glock.client_id = fid->clnt->name; 263 264 res = p9_client_getlock_dotl(fid, &glock); 265 if (res < 0) 266 goto out; 267 /* map 9p lock type to os lock type */ 268 switch (glock.type) { 269 case P9_LOCK_TYPE_RDLCK: 270 fl->fl_type = F_RDLCK; 271 break; 272 case P9_LOCK_TYPE_WRLCK: 273 fl->fl_type = F_WRLCK; 274 break; 275 case P9_LOCK_TYPE_UNLCK: 276 fl->fl_type = F_UNLCK; 277 break; 278 } 279 if (glock.type != P9_LOCK_TYPE_UNLCK) { 280 fl->fl_start = glock.start; 281 if (glock.length == 0) 282 fl->fl_end = OFFSET_MAX; 283 else 284 fl->fl_end = glock.start + glock.length - 1; 285 fl->fl_pid = -glock.proc_id; 286 } 287 out: 288 if (glock.client_id != fid->clnt->name) 289 kfree(glock.client_id); 290 return res; 291 } 292 293 /** 294 * v9fs_file_lock_dotl - lock a file (or directory) 295 * @filp: file to be locked 296 * @cmd: lock command 297 * @fl: file lock structure 298 * 299 */ 300 301 static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl) 302 { 303 struct inode *inode = file_inode(filp); 304 int ret = -ENOLCK; 305 306 p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n", 307 filp, cmd, fl, filp); 308 309 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { 310 filemap_write_and_wait(inode->i_mapping); 311 invalidate_mapping_pages(&inode->i_data, 0, -1); 312 } 313 314 if (IS_SETLK(cmd) || IS_SETLKW(cmd)) 315 ret = v9fs_file_do_lock(filp, cmd, fl); 316 else if (IS_GETLK(cmd)) 317 ret = v9fs_file_getlock(filp, fl); 318 else 319 ret = -EINVAL; 320 return ret; 321 } 322 323 /** 324 * v9fs_file_flock_dotl - lock a file 325 * @filp: file to be locked 326 * @cmd: lock command 327 * @fl: file lock structure 328 * 329 */ 330 331 static int v9fs_file_flock_dotl(struct file *filp, int cmd, 332 struct file_lock *fl) 333 { 334 struct inode *inode = file_inode(filp); 335 int ret = -ENOLCK; 336 337 p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n", 338 filp, cmd, fl, filp); 339 340 if (!(fl->fl_flags & FL_FLOCK)) 341 goto out_err; 342 343 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { 344 filemap_write_and_wait(inode->i_mapping); 345 invalidate_mapping_pages(&inode->i_data, 0, -1); 346 } 347 /* Convert flock to posix lock */ 348 fl->fl_flags |= FL_POSIX; 349 fl->fl_flags ^= FL_FLOCK; 350 351 if (IS_SETLK(cmd) | IS_SETLKW(cmd)) 352 ret = v9fs_file_do_lock(filp, cmd, fl); 353 else 354 ret = -EINVAL; 355 out_err: 356 return ret; 357 } 358 359 /** 360 * v9fs_file_read_iter - read from a file 361 * @iocb: The operation parameters 362 * @to: The buffer to read into 363 * 364 */ 365 static ssize_t 366 v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 367 { 368 struct p9_fid *fid = iocb->ki_filp->private_data; 369 int ret, err = 0; 370 371 p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n", 372 iov_iter_count(to), iocb->ki_pos); 373 374 if (iocb->ki_filp->f_flags & O_NONBLOCK) 375 ret = p9_client_read_once(fid, iocb->ki_pos, to, &err); 376 else 377 ret = p9_client_read(fid, iocb->ki_pos, to, &err); 378 if (!ret) 379 return err; 380 381 iocb->ki_pos += ret; 382 return ret; 383 } 384 385 /** 386 * v9fs_file_write_iter - write to a file 387 * @iocb: The operation parameters 388 * @from: The data to write 389 * 390 */ 391 static ssize_t 392 v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 393 { 394 struct file *file = iocb->ki_filp; 395 ssize_t retval; 396 loff_t origin; 397 int err = 0; 398 399 retval = generic_write_checks(iocb, from); 400 if (retval <= 0) 401 return retval; 402 403 origin = iocb->ki_pos; 404 retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err); 405 if (retval > 0) { 406 struct inode *inode = file_inode(file); 407 loff_t i_size; 408 unsigned long pg_start, pg_end; 409 410 pg_start = origin >> PAGE_SHIFT; 411 pg_end = (origin + retval - 1) >> PAGE_SHIFT; 412 if (inode->i_mapping && inode->i_mapping->nrpages) 413 invalidate_inode_pages2_range(inode->i_mapping, 414 pg_start, pg_end); 415 iocb->ki_pos += retval; 416 i_size = i_size_read(inode); 417 if (iocb->ki_pos > i_size) { 418 inode_add_bytes(inode, iocb->ki_pos - i_size); 419 /* 420 * Need to serialize against i_size_write() in 421 * v9fs_stat2inode() 422 */ 423 v9fs_i_size_write(inode, iocb->ki_pos); 424 } 425 return retval; 426 } 427 return err; 428 } 429 430 static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end, 431 int datasync) 432 { 433 struct p9_fid *fid; 434 struct inode *inode = filp->f_mapping->host; 435 struct p9_wstat wstat; 436 int retval; 437 438 retval = file_write_and_wait_range(filp, start, end); 439 if (retval) 440 return retval; 441 442 inode_lock(inode); 443 p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync); 444 445 fid = filp->private_data; 446 v9fs_blank_wstat(&wstat); 447 448 retval = p9_client_wstat(fid, &wstat); 449 inode_unlock(inode); 450 451 return retval; 452 } 453 454 int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end, 455 int datasync) 456 { 457 struct p9_fid *fid; 458 struct inode *inode = filp->f_mapping->host; 459 int retval; 460 461 retval = file_write_and_wait_range(filp, start, end); 462 if (retval) 463 return retval; 464 465 inode_lock(inode); 466 p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync); 467 468 fid = filp->private_data; 469 470 retval = p9_client_fsync(fid, datasync); 471 inode_unlock(inode); 472 473 return retval; 474 } 475 476 static int 477 v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma) 478 { 479 int retval; 480 481 482 retval = generic_file_mmap(filp, vma); 483 if (!retval) 484 vma->vm_ops = &v9fs_file_vm_ops; 485 486 return retval; 487 } 488 489 static int 490 v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma) 491 { 492 int retval; 493 struct inode *inode; 494 struct v9fs_inode *v9inode; 495 struct p9_fid *fid; 496 497 inode = file_inode(filp); 498 v9inode = V9FS_I(inode); 499 mutex_lock(&v9inode->v_mutex); 500 if (!v9inode->writeback_fid && 501 (vma->vm_flags & VM_SHARED) && 502 (vma->vm_flags & VM_WRITE)) { 503 /* 504 * clone a fid and add it to writeback_fid 505 * we do it during mmap instead of 506 * page dirty time via write_begin/page_mkwrite 507 * because we want write after unlink usecase 508 * to work. 509 */ 510 fid = v9fs_writeback_fid(file_dentry(filp)); 511 if (IS_ERR(fid)) { 512 retval = PTR_ERR(fid); 513 mutex_unlock(&v9inode->v_mutex); 514 return retval; 515 } 516 v9inode->writeback_fid = (void *) fid; 517 } 518 mutex_unlock(&v9inode->v_mutex); 519 520 retval = generic_file_mmap(filp, vma); 521 if (!retval) 522 vma->vm_ops = &v9fs_mmap_file_vm_ops; 523 524 return retval; 525 } 526 527 static vm_fault_t 528 v9fs_vm_page_mkwrite(struct vm_fault *vmf) 529 { 530 struct v9fs_inode *v9inode; 531 struct folio *folio = page_folio(vmf->page); 532 struct file *filp = vmf->vma->vm_file; 533 struct inode *inode = file_inode(filp); 534 535 536 p9_debug(P9_DEBUG_VFS, "folio %p fid %lx\n", 537 folio, (unsigned long)filp->private_data); 538 539 v9inode = V9FS_I(inode); 540 541 /* Wait for the page to be written to the cache before we allow it to 542 * be modified. We then assume the entire page will need writing back. 543 */ 544 #ifdef CONFIG_9P_FSCACHE 545 if (folio_test_fscache(folio) && 546 folio_wait_fscache_killable(folio) < 0) 547 return VM_FAULT_NOPAGE; 548 #endif 549 550 /* Update file times before taking page lock */ 551 file_update_time(filp); 552 553 BUG_ON(!v9inode->writeback_fid); 554 if (folio_lock_killable(folio) < 0) 555 return VM_FAULT_RETRY; 556 if (folio_mapping(folio) != inode->i_mapping) 557 goto out_unlock; 558 folio_wait_stable(folio); 559 560 return VM_FAULT_LOCKED; 561 out_unlock: 562 folio_unlock(folio); 563 return VM_FAULT_NOPAGE; 564 } 565 566 /** 567 * v9fs_mmap_file_read_iter - read from a file 568 * @iocb: The operation parameters 569 * @to: The buffer to read into 570 * 571 */ 572 static ssize_t 573 v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 574 { 575 /* TODO: Check if there are dirty pages */ 576 return v9fs_file_read_iter(iocb, to); 577 } 578 579 /** 580 * v9fs_mmap_file_write_iter - write to a file 581 * @iocb: The operation parameters 582 * @from: The data to write 583 * 584 */ 585 static ssize_t 586 v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 587 { 588 /* 589 * TODO: invalidate mmaps on filp's inode between 590 * offset and offset+count 591 */ 592 return v9fs_file_write_iter(iocb, from); 593 } 594 595 static void v9fs_mmap_vm_close(struct vm_area_struct *vma) 596 { 597 struct inode *inode; 598 599 struct writeback_control wbc = { 600 .nr_to_write = LONG_MAX, 601 .sync_mode = WB_SYNC_ALL, 602 .range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE, 603 /* absolute end, byte at end included */ 604 .range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE + 605 (vma->vm_end - vma->vm_start - 1), 606 }; 607 608 if (!(vma->vm_flags & VM_SHARED)) 609 return; 610 611 p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma); 612 613 inode = file_inode(vma->vm_file); 614 filemap_fdatawrite_wbc(inode->i_mapping, &wbc); 615 } 616 617 618 static const struct vm_operations_struct v9fs_file_vm_ops = { 619 .fault = filemap_fault, 620 .map_pages = filemap_map_pages, 621 .page_mkwrite = v9fs_vm_page_mkwrite, 622 }; 623 624 static const struct vm_operations_struct v9fs_mmap_file_vm_ops = { 625 .close = v9fs_mmap_vm_close, 626 .fault = filemap_fault, 627 .map_pages = filemap_map_pages, 628 .page_mkwrite = v9fs_vm_page_mkwrite, 629 }; 630 631 632 const struct file_operations v9fs_cached_file_operations = { 633 .llseek = generic_file_llseek, 634 .read_iter = generic_file_read_iter, 635 .write_iter = generic_file_write_iter, 636 .open = v9fs_file_open, 637 .release = v9fs_dir_release, 638 .lock = v9fs_file_lock, 639 .mmap = v9fs_file_mmap, 640 .splice_read = generic_file_splice_read, 641 .splice_write = iter_file_splice_write, 642 .fsync = v9fs_file_fsync, 643 }; 644 645 const struct file_operations v9fs_cached_file_operations_dotl = { 646 .llseek = generic_file_llseek, 647 .read_iter = generic_file_read_iter, 648 .write_iter = generic_file_write_iter, 649 .open = v9fs_file_open, 650 .release = v9fs_dir_release, 651 .lock = v9fs_file_lock_dotl, 652 .flock = v9fs_file_flock_dotl, 653 .mmap = v9fs_file_mmap, 654 .splice_read = generic_file_splice_read, 655 .splice_write = iter_file_splice_write, 656 .fsync = v9fs_file_fsync_dotl, 657 }; 658 659 const struct file_operations v9fs_file_operations = { 660 .llseek = generic_file_llseek, 661 .read_iter = v9fs_file_read_iter, 662 .write_iter = v9fs_file_write_iter, 663 .open = v9fs_file_open, 664 .release = v9fs_dir_release, 665 .lock = v9fs_file_lock, 666 .mmap = generic_file_readonly_mmap, 667 .splice_read = generic_file_splice_read, 668 .splice_write = iter_file_splice_write, 669 .fsync = v9fs_file_fsync, 670 }; 671 672 const struct file_operations v9fs_file_operations_dotl = { 673 .llseek = generic_file_llseek, 674 .read_iter = v9fs_file_read_iter, 675 .write_iter = v9fs_file_write_iter, 676 .open = v9fs_file_open, 677 .release = v9fs_dir_release, 678 .lock = v9fs_file_lock_dotl, 679 .flock = v9fs_file_flock_dotl, 680 .mmap = generic_file_readonly_mmap, 681 .splice_read = generic_file_splice_read, 682 .splice_write = iter_file_splice_write, 683 .fsync = v9fs_file_fsync_dotl, 684 }; 685 686 const struct file_operations v9fs_mmap_file_operations = { 687 .llseek = generic_file_llseek, 688 .read_iter = v9fs_mmap_file_read_iter, 689 .write_iter = v9fs_mmap_file_write_iter, 690 .open = v9fs_file_open, 691 .release = v9fs_dir_release, 692 .lock = v9fs_file_lock, 693 .mmap = v9fs_mmap_file_mmap, 694 .splice_read = generic_file_splice_read, 695 .splice_write = iter_file_splice_write, 696 .fsync = v9fs_file_fsync, 697 }; 698 699 const struct file_operations v9fs_mmap_file_operations_dotl = { 700 .llseek = generic_file_llseek, 701 .read_iter = v9fs_mmap_file_read_iter, 702 .write_iter = v9fs_mmap_file_write_iter, 703 .open = v9fs_file_open, 704 .release = v9fs_dir_release, 705 .lock = v9fs_file_lock_dotl, 706 .flock = v9fs_file_flock_dotl, 707 .mmap = v9fs_mmap_file_mmap, 708 .splice_read = generic_file_splice_read, 709 .splice_write = iter_file_splice_write, 710 .fsync = v9fs_file_fsync_dotl, 711 }; 712