1 /* 2 * linux/fs/9p/vfs_file.c 3 * 4 * This file contians vfs file ops for 9P2000. 5 * 6 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> 7 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 11 * as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to: 20 * Free Software Foundation 21 * 51 Franklin Street, Fifth Floor 22 * Boston, MA 02111-1301 USA 23 * 24 */ 25 26 #include <linux/module.h> 27 #include <linux/errno.h> 28 #include <linux/fs.h> 29 #include <linux/sched.h> 30 #include <linux/file.h> 31 #include <linux/stat.h> 32 #include <linux/string.h> 33 #include <linux/inet.h> 34 #include <linux/list.h> 35 #include <linux/pagemap.h> 36 #include <linux/utsname.h> 37 #include <asm/uaccess.h> 38 #include <linux/idr.h> 39 #include <net/9p/9p.h> 40 #include <net/9p/client.h> 41 42 #include "v9fs.h" 43 #include "v9fs_vfs.h" 44 #include "fid.h" 45 #include "cache.h" 46 47 static const struct vm_operations_struct v9fs_file_vm_ops; 48 static const struct vm_operations_struct v9fs_mmap_file_vm_ops; 49 50 /** 51 * v9fs_file_open - open a file (or directory) 52 * @inode: inode to be opened 53 * @file: file being opened 54 * 55 */ 56 57 int v9fs_file_open(struct inode *inode, struct file *file) 58 { 59 int err; 60 struct v9fs_inode *v9inode; 61 struct v9fs_session_info *v9ses; 62 struct p9_fid *fid; 63 int omode; 64 65 p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file); 66 v9inode = V9FS_I(inode); 67 v9ses = v9fs_inode2v9ses(inode); 68 if (v9fs_proto_dotl(v9ses)) 69 omode = v9fs_open_to_dotl_flags(file->f_flags); 70 else 71 omode = v9fs_uflags2omode(file->f_flags, 72 v9fs_proto_dotu(v9ses)); 73 fid = file->private_data; 74 if (!fid) { 75 fid = v9fs_fid_clone(file->f_path.dentry); 76 if (IS_ERR(fid)) 77 return PTR_ERR(fid); 78 79 err = p9_client_open(fid, omode); 80 if (err < 0) { 81 p9_client_clunk(fid); 82 return err; 83 } 84 if ((file->f_flags & O_APPEND) && 85 (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses))) 86 generic_file_llseek(file, 0, SEEK_END); 87 } 88 89 file->private_data = fid; 90 mutex_lock(&v9inode->v_mutex); 91 if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) && 92 !v9inode->writeback_fid && 93 ((file->f_flags & O_ACCMODE) != O_RDONLY)) { 94 /* 95 * clone a fid and add it to writeback_fid 96 * we do it during open time instead of 97 * page dirty time via write_begin/page_mkwrite 98 * because we want write after unlink usecase 99 * to work. 100 */ 101 fid = v9fs_writeback_fid(file->f_path.dentry); 102 if (IS_ERR(fid)) { 103 err = PTR_ERR(fid); 104 mutex_unlock(&v9inode->v_mutex); 105 goto out_error; 106 } 107 v9inode->writeback_fid = (void *) fid; 108 } 109 mutex_unlock(&v9inode->v_mutex); 110 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) 111 v9fs_cache_inode_set_cookie(inode, file); 112 return 0; 113 out_error: 114 p9_client_clunk(file->private_data); 115 file->private_data = NULL; 116 return err; 117 } 118 119 /** 120 * v9fs_file_lock - lock a file (or directory) 121 * @filp: file to be locked 122 * @cmd: lock command 123 * @fl: file lock structure 124 * 125 * Bugs: this looks like a local only lock, we should extend into 9P 126 * by using open exclusive 127 */ 128 129 static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl) 130 { 131 int res = 0; 132 struct inode *inode = file_inode(filp); 133 134 p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl); 135 136 /* No mandatory locks */ 137 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) 138 return -ENOLCK; 139 140 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { 141 filemap_write_and_wait(inode->i_mapping); 142 invalidate_mapping_pages(&inode->i_data, 0, -1); 143 } 144 145 return res; 146 } 147 148 static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl) 149 { 150 struct p9_flock flock; 151 struct p9_fid *fid; 152 uint8_t status; 153 int res = 0; 154 unsigned char fl_type; 155 156 fid = filp->private_data; 157 BUG_ON(fid == NULL); 158 159 if ((fl->fl_flags & FL_POSIX) != FL_POSIX) 160 BUG(); 161 162 res = posix_lock_file_wait(filp, fl); 163 if (res < 0) 164 goto out; 165 166 /* convert posix lock to p9 tlock args */ 167 memset(&flock, 0, sizeof(flock)); 168 /* map the lock type */ 169 switch (fl->fl_type) { 170 case F_RDLCK: 171 flock.type = P9_LOCK_TYPE_RDLCK; 172 break; 173 case F_WRLCK: 174 flock.type = P9_LOCK_TYPE_WRLCK; 175 break; 176 case F_UNLCK: 177 flock.type = P9_LOCK_TYPE_UNLCK; 178 break; 179 } 180 flock.start = fl->fl_start; 181 if (fl->fl_end == OFFSET_MAX) 182 flock.length = 0; 183 else 184 flock.length = fl->fl_end - fl->fl_start + 1; 185 flock.proc_id = fl->fl_pid; 186 flock.client_id = fid->clnt->name; 187 if (IS_SETLKW(cmd)) 188 flock.flags = P9_LOCK_FLAGS_BLOCK; 189 190 /* 191 * if its a blocked request and we get P9_LOCK_BLOCKED as the status 192 * for lock request, keep on trying 193 */ 194 for (;;) { 195 res = p9_client_lock_dotl(fid, &flock, &status); 196 if (res < 0) 197 goto out_unlock; 198 199 if (status != P9_LOCK_BLOCKED) 200 break; 201 if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd)) 202 break; 203 if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0) 204 break; 205 } 206 207 /* map 9p status to VFS status */ 208 switch (status) { 209 case P9_LOCK_SUCCESS: 210 res = 0; 211 break; 212 case P9_LOCK_BLOCKED: 213 res = -EAGAIN; 214 break; 215 default: 216 WARN_ONCE(1, "unknown lock status code: %d\n", status); 217 /* fallthough */ 218 case P9_LOCK_ERROR: 219 case P9_LOCK_GRACE: 220 res = -ENOLCK; 221 break; 222 } 223 224 out_unlock: 225 /* 226 * incase server returned error for lock request, revert 227 * it locally 228 */ 229 if (res < 0 && fl->fl_type != F_UNLCK) { 230 fl_type = fl->fl_type; 231 fl->fl_type = F_UNLCK; 232 res = posix_lock_file_wait(filp, fl); 233 fl->fl_type = fl_type; 234 } 235 out: 236 return res; 237 } 238 239 static int v9fs_file_getlock(struct file *filp, struct file_lock *fl) 240 { 241 struct p9_getlock glock; 242 struct p9_fid *fid; 243 int res = 0; 244 245 fid = filp->private_data; 246 BUG_ON(fid == NULL); 247 248 posix_test_lock(filp, fl); 249 /* 250 * if we have a conflicting lock locally, no need to validate 251 * with server 252 */ 253 if (fl->fl_type != F_UNLCK) 254 return res; 255 256 /* convert posix lock to p9 tgetlock args */ 257 memset(&glock, 0, sizeof(glock)); 258 glock.type = P9_LOCK_TYPE_UNLCK; 259 glock.start = fl->fl_start; 260 if (fl->fl_end == OFFSET_MAX) 261 glock.length = 0; 262 else 263 glock.length = fl->fl_end - fl->fl_start + 1; 264 glock.proc_id = fl->fl_pid; 265 glock.client_id = fid->clnt->name; 266 267 res = p9_client_getlock_dotl(fid, &glock); 268 if (res < 0) 269 return res; 270 /* map 9p lock type to os lock type */ 271 switch (glock.type) { 272 case P9_LOCK_TYPE_RDLCK: 273 fl->fl_type = F_RDLCK; 274 break; 275 case P9_LOCK_TYPE_WRLCK: 276 fl->fl_type = F_WRLCK; 277 break; 278 case P9_LOCK_TYPE_UNLCK: 279 fl->fl_type = F_UNLCK; 280 break; 281 } 282 if (glock.type != P9_LOCK_TYPE_UNLCK) { 283 fl->fl_start = glock.start; 284 if (glock.length == 0) 285 fl->fl_end = OFFSET_MAX; 286 else 287 fl->fl_end = glock.start + glock.length - 1; 288 fl->fl_pid = glock.proc_id; 289 } 290 return res; 291 } 292 293 /** 294 * v9fs_file_lock_dotl - lock a file (or directory) 295 * @filp: file to be locked 296 * @cmd: lock command 297 * @fl: file lock structure 298 * 299 */ 300 301 static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl) 302 { 303 struct inode *inode = file_inode(filp); 304 int ret = -ENOLCK; 305 306 p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n", 307 filp, cmd, fl, filp); 308 309 /* No mandatory locks */ 310 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) 311 goto out_err; 312 313 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { 314 filemap_write_and_wait(inode->i_mapping); 315 invalidate_mapping_pages(&inode->i_data, 0, -1); 316 } 317 318 if (IS_SETLK(cmd) || IS_SETLKW(cmd)) 319 ret = v9fs_file_do_lock(filp, cmd, fl); 320 else if (IS_GETLK(cmd)) 321 ret = v9fs_file_getlock(filp, fl); 322 else 323 ret = -EINVAL; 324 out_err: 325 return ret; 326 } 327 328 /** 329 * v9fs_file_flock_dotl - lock a file 330 * @filp: file to be locked 331 * @cmd: lock command 332 * @fl: file lock structure 333 * 334 */ 335 336 static int v9fs_file_flock_dotl(struct file *filp, int cmd, 337 struct file_lock *fl) 338 { 339 struct inode *inode = file_inode(filp); 340 int ret = -ENOLCK; 341 342 p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n", 343 filp, cmd, fl, filp); 344 345 /* No mandatory locks */ 346 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) 347 goto out_err; 348 349 if (!(fl->fl_flags & FL_FLOCK)) 350 goto out_err; 351 352 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { 353 filemap_write_and_wait(inode->i_mapping); 354 invalidate_mapping_pages(&inode->i_data, 0, -1); 355 } 356 /* Convert flock to posix lock */ 357 fl->fl_flags |= FL_POSIX; 358 fl->fl_flags ^= FL_FLOCK; 359 360 if (IS_SETLK(cmd) | IS_SETLKW(cmd)) 361 ret = v9fs_file_do_lock(filp, cmd, fl); 362 else 363 ret = -EINVAL; 364 out_err: 365 return ret; 366 } 367 368 /** 369 * v9fs_fid_readn - read from a fid 370 * @fid: fid to read 371 * @data: data buffer to read data into 372 * @udata: user data buffer to read data into 373 * @count: size of buffer 374 * @offset: offset at which to read data 375 * 376 */ 377 ssize_t 378 v9fs_fid_readn(struct p9_fid *fid, char *data, char __user *udata, u32 count, 379 u64 offset) 380 { 381 int n, total, size; 382 383 p9_debug(P9_DEBUG_VFS, "fid %d offset %llu count %d\n", 384 fid->fid, (long long unsigned)offset, count); 385 n = 0; 386 total = 0; 387 size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ; 388 do { 389 n = p9_client_read(fid, data, udata, offset, count); 390 if (n <= 0) 391 break; 392 393 if (data) 394 data += n; 395 if (udata) 396 udata += n; 397 398 offset += n; 399 count -= n; 400 total += n; 401 } while (count > 0 && n == size); 402 403 if (n < 0) 404 total = n; 405 406 return total; 407 } 408 409 /** 410 * v9fs_file_readn - read from a file 411 * @filp: file pointer to read 412 * @data: data buffer to read data into 413 * @udata: user data buffer to read data into 414 * @count: size of buffer 415 * @offset: offset at which to read data 416 * 417 */ 418 ssize_t 419 v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count, 420 u64 offset) 421 { 422 return v9fs_fid_readn(filp->private_data, data, udata, count, offset); 423 } 424 425 /** 426 * v9fs_file_read - read from a file 427 * @filp: file pointer to read 428 * @udata: user data buffer to read data into 429 * @count: size of buffer 430 * @offset: offset at which to read data 431 * 432 */ 433 434 static ssize_t 435 v9fs_file_read(struct file *filp, char __user *udata, size_t count, 436 loff_t * offset) 437 { 438 int ret; 439 struct p9_fid *fid; 440 size_t size; 441 442 p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n", count, *offset); 443 fid = filp->private_data; 444 445 size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ; 446 if (count > size) 447 ret = v9fs_file_readn(filp, NULL, udata, count, *offset); 448 else 449 ret = p9_client_read(fid, NULL, udata, *offset, count); 450 451 if (ret > 0) 452 *offset += ret; 453 454 return ret; 455 } 456 457 ssize_t 458 v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid, 459 const char __user *data, size_t count, 460 loff_t *offset, int invalidate) 461 { 462 int n; 463 loff_t i_size; 464 size_t total = 0; 465 loff_t origin = *offset; 466 unsigned long pg_start, pg_end; 467 468 p9_debug(P9_DEBUG_VFS, "data %p count %d offset %x\n", 469 data, (int)count, (int)*offset); 470 471 do { 472 n = p9_client_write(fid, NULL, data+total, origin+total, count); 473 if (n <= 0) 474 break; 475 count -= n; 476 total += n; 477 } while (count > 0); 478 479 if (invalidate && (total > 0)) { 480 pg_start = origin >> PAGE_CACHE_SHIFT; 481 pg_end = (origin + total - 1) >> PAGE_CACHE_SHIFT; 482 if (inode->i_mapping && inode->i_mapping->nrpages) 483 invalidate_inode_pages2_range(inode->i_mapping, 484 pg_start, pg_end); 485 *offset += total; 486 i_size = i_size_read(inode); 487 if (*offset > i_size) { 488 inode_add_bytes(inode, *offset - i_size); 489 i_size_write(inode, *offset); 490 } 491 } 492 if (n < 0) 493 return n; 494 495 return total; 496 } 497 498 /** 499 * v9fs_file_write - write to a file 500 * @filp: file pointer to write 501 * @data: data buffer to write data from 502 * @count: size of buffer 503 * @offset: offset at which to write data 504 * 505 */ 506 static ssize_t 507 v9fs_file_write(struct file *filp, const char __user * data, 508 size_t count, loff_t *offset) 509 { 510 ssize_t retval = 0; 511 loff_t origin = *offset; 512 513 514 retval = generic_write_checks(filp, &origin, &count, 0); 515 if (retval) 516 goto out; 517 518 retval = -EINVAL; 519 if ((ssize_t) count < 0) 520 goto out; 521 retval = 0; 522 if (!count) 523 goto out; 524 525 retval = v9fs_file_write_internal(file_inode(filp), 526 filp->private_data, 527 data, count, &origin, 1); 528 /* update offset on successful write */ 529 if (retval > 0) 530 *offset = origin; 531 out: 532 return retval; 533 } 534 535 536 static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end, 537 int datasync) 538 { 539 struct p9_fid *fid; 540 struct inode *inode = filp->f_mapping->host; 541 struct p9_wstat wstat; 542 int retval; 543 544 retval = filemap_write_and_wait_range(inode->i_mapping, start, end); 545 if (retval) 546 return retval; 547 548 mutex_lock(&inode->i_mutex); 549 p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync); 550 551 fid = filp->private_data; 552 v9fs_blank_wstat(&wstat); 553 554 retval = p9_client_wstat(fid, &wstat); 555 mutex_unlock(&inode->i_mutex); 556 557 return retval; 558 } 559 560 int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end, 561 int datasync) 562 { 563 struct p9_fid *fid; 564 struct inode *inode = filp->f_mapping->host; 565 int retval; 566 567 retval = filemap_write_and_wait_range(inode->i_mapping, start, end); 568 if (retval) 569 return retval; 570 571 mutex_lock(&inode->i_mutex); 572 p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync); 573 574 fid = filp->private_data; 575 576 retval = p9_client_fsync(fid, datasync); 577 mutex_unlock(&inode->i_mutex); 578 579 return retval; 580 } 581 582 static int 583 v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma) 584 { 585 int retval; 586 587 588 retval = generic_file_mmap(filp, vma); 589 if (!retval) 590 vma->vm_ops = &v9fs_file_vm_ops; 591 592 return retval; 593 } 594 595 static int 596 v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma) 597 { 598 int retval; 599 struct inode *inode; 600 struct v9fs_inode *v9inode; 601 struct p9_fid *fid; 602 603 inode = file_inode(filp); 604 v9inode = V9FS_I(inode); 605 mutex_lock(&v9inode->v_mutex); 606 if (!v9inode->writeback_fid && 607 (vma->vm_flags & VM_WRITE)) { 608 /* 609 * clone a fid and add it to writeback_fid 610 * we do it during mmap instead of 611 * page dirty time via write_begin/page_mkwrite 612 * because we want write after unlink usecase 613 * to work. 614 */ 615 fid = v9fs_writeback_fid(filp->f_path.dentry); 616 if (IS_ERR(fid)) { 617 retval = PTR_ERR(fid); 618 mutex_unlock(&v9inode->v_mutex); 619 return retval; 620 } 621 v9inode->writeback_fid = (void *) fid; 622 } 623 mutex_unlock(&v9inode->v_mutex); 624 625 retval = generic_file_mmap(filp, vma); 626 if (!retval) 627 vma->vm_ops = &v9fs_mmap_file_vm_ops; 628 629 return retval; 630 } 631 632 static int 633 v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 634 { 635 struct v9fs_inode *v9inode; 636 struct page *page = vmf->page; 637 struct file *filp = vma->vm_file; 638 struct inode *inode = file_inode(filp); 639 640 641 p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n", 642 page, (unsigned long)filp->private_data); 643 644 /* Update file times before taking page lock */ 645 file_update_time(filp); 646 647 v9inode = V9FS_I(inode); 648 /* make sure the cache has finished storing the page */ 649 v9fs_fscache_wait_on_page_write(inode, page); 650 BUG_ON(!v9inode->writeback_fid); 651 lock_page(page); 652 if (page->mapping != inode->i_mapping) 653 goto out_unlock; 654 wait_for_stable_page(page); 655 656 return VM_FAULT_LOCKED; 657 out_unlock: 658 unlock_page(page); 659 return VM_FAULT_NOPAGE; 660 } 661 662 static ssize_t 663 v9fs_direct_read(struct file *filp, char __user *udata, size_t count, 664 loff_t *offsetp) 665 { 666 loff_t size, offset; 667 struct inode *inode; 668 struct address_space *mapping; 669 670 offset = *offsetp; 671 mapping = filp->f_mapping; 672 inode = mapping->host; 673 if (!count) 674 return 0; 675 size = i_size_read(inode); 676 if (offset < size) 677 filemap_write_and_wait_range(mapping, offset, 678 offset + count - 1); 679 680 return v9fs_file_read(filp, udata, count, offsetp); 681 } 682 683 /** 684 * v9fs_cached_file_read - read from a file 685 * @filp: file pointer to read 686 * @data: user data buffer to read data into 687 * @count: size of buffer 688 * @offset: offset at which to read data 689 * 690 */ 691 static ssize_t 692 v9fs_cached_file_read(struct file *filp, char __user *data, size_t count, 693 loff_t *offset) 694 { 695 if (filp->f_flags & O_DIRECT) 696 return v9fs_direct_read(filp, data, count, offset); 697 return new_sync_read(filp, data, count, offset); 698 } 699 700 /** 701 * v9fs_mmap_file_read - read from a file 702 * @filp: file pointer to read 703 * @data: user data buffer to read data into 704 * @count: size of buffer 705 * @offset: offset at which to read data 706 * 707 */ 708 static ssize_t 709 v9fs_mmap_file_read(struct file *filp, char __user *data, size_t count, 710 loff_t *offset) 711 { 712 /* TODO: Check if there are dirty pages */ 713 return v9fs_file_read(filp, data, count, offset); 714 } 715 716 static ssize_t 717 v9fs_direct_write(struct file *filp, const char __user * data, 718 size_t count, loff_t *offsetp) 719 { 720 loff_t offset; 721 ssize_t retval; 722 struct inode *inode; 723 struct address_space *mapping; 724 725 offset = *offsetp; 726 mapping = filp->f_mapping; 727 inode = mapping->host; 728 if (!count) 729 return 0; 730 731 mutex_lock(&inode->i_mutex); 732 retval = filemap_write_and_wait_range(mapping, offset, 733 offset + count - 1); 734 if (retval) 735 goto err_out; 736 /* 737 * After a write we want buffered reads to be sure to go to disk to get 738 * the new data. We invalidate clean cached page from the region we're 739 * about to write. We do this *before* the write so that if we fail 740 * here we fall back to buffered write 741 */ 742 if (mapping->nrpages) { 743 pgoff_t pg_start = offset >> PAGE_CACHE_SHIFT; 744 pgoff_t pg_end = (offset + count - 1) >> PAGE_CACHE_SHIFT; 745 746 retval = invalidate_inode_pages2_range(mapping, 747 pg_start, pg_end); 748 /* 749 * If a page can not be invalidated, fall back 750 * to buffered write. 751 */ 752 if (retval) { 753 if (retval == -EBUSY) 754 goto buff_write; 755 goto err_out; 756 } 757 } 758 retval = v9fs_file_write(filp, data, count, offsetp); 759 err_out: 760 mutex_unlock(&inode->i_mutex); 761 return retval; 762 763 buff_write: 764 mutex_unlock(&inode->i_mutex); 765 return new_sync_write(filp, data, count, offsetp); 766 } 767 768 /** 769 * v9fs_cached_file_write - write to a file 770 * @filp: file pointer to write 771 * @data: data buffer to write data from 772 * @count: size of buffer 773 * @offset: offset at which to write data 774 * 775 */ 776 static ssize_t 777 v9fs_cached_file_write(struct file *filp, const char __user * data, 778 size_t count, loff_t *offset) 779 { 780 781 if (filp->f_flags & O_DIRECT) 782 return v9fs_direct_write(filp, data, count, offset); 783 return new_sync_write(filp, data, count, offset); 784 } 785 786 787 /** 788 * v9fs_mmap_file_write - write to a file 789 * @filp: file pointer to write 790 * @data: data buffer to write data from 791 * @count: size of buffer 792 * @offset: offset at which to write data 793 * 794 */ 795 static ssize_t 796 v9fs_mmap_file_write(struct file *filp, const char __user *data, 797 size_t count, loff_t *offset) 798 { 799 /* 800 * TODO: invalidate mmaps on filp's inode between 801 * offset and offset+count 802 */ 803 return v9fs_file_write(filp, data, count, offset); 804 } 805 806 static void v9fs_mmap_vm_close(struct vm_area_struct *vma) 807 { 808 struct inode *inode; 809 810 struct writeback_control wbc = { 811 .nr_to_write = LONG_MAX, 812 .sync_mode = WB_SYNC_ALL, 813 .range_start = vma->vm_pgoff * PAGE_SIZE, 814 /* absolute end, byte at end included */ 815 .range_end = vma->vm_pgoff * PAGE_SIZE + 816 (vma->vm_end - vma->vm_start - 1), 817 }; 818 819 820 p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma); 821 822 inode = file_inode(vma->vm_file); 823 824 if (!mapping_cap_writeback_dirty(inode->i_mapping)) 825 wbc.nr_to_write = 0; 826 827 might_sleep(); 828 sync_inode(inode, &wbc); 829 } 830 831 832 static const struct vm_operations_struct v9fs_file_vm_ops = { 833 .fault = filemap_fault, 834 .map_pages = filemap_map_pages, 835 .page_mkwrite = v9fs_vm_page_mkwrite, 836 }; 837 838 static const struct vm_operations_struct v9fs_mmap_file_vm_ops = { 839 .close = v9fs_mmap_vm_close, 840 .fault = filemap_fault, 841 .map_pages = filemap_map_pages, 842 .page_mkwrite = v9fs_vm_page_mkwrite, 843 }; 844 845 846 const struct file_operations v9fs_cached_file_operations = { 847 .llseek = generic_file_llseek, 848 .read = v9fs_cached_file_read, 849 .write = v9fs_cached_file_write, 850 .read_iter = generic_file_read_iter, 851 .write_iter = generic_file_write_iter, 852 .open = v9fs_file_open, 853 .release = v9fs_dir_release, 854 .lock = v9fs_file_lock, 855 .mmap = v9fs_file_mmap, 856 .fsync = v9fs_file_fsync, 857 }; 858 859 const struct file_operations v9fs_cached_file_operations_dotl = { 860 .llseek = generic_file_llseek, 861 .read = v9fs_cached_file_read, 862 .write = v9fs_cached_file_write, 863 .read_iter = generic_file_read_iter, 864 .write_iter = generic_file_write_iter, 865 .open = v9fs_file_open, 866 .release = v9fs_dir_release, 867 .lock = v9fs_file_lock_dotl, 868 .flock = v9fs_file_flock_dotl, 869 .mmap = v9fs_file_mmap, 870 .fsync = v9fs_file_fsync_dotl, 871 }; 872 873 const struct file_operations v9fs_file_operations = { 874 .llseek = generic_file_llseek, 875 .read = v9fs_file_read, 876 .write = v9fs_file_write, 877 .open = v9fs_file_open, 878 .release = v9fs_dir_release, 879 .lock = v9fs_file_lock, 880 .mmap = generic_file_readonly_mmap, 881 .fsync = v9fs_file_fsync, 882 }; 883 884 const struct file_operations v9fs_file_operations_dotl = { 885 .llseek = generic_file_llseek, 886 .read = v9fs_file_read, 887 .write = v9fs_file_write, 888 .open = v9fs_file_open, 889 .release = v9fs_dir_release, 890 .lock = v9fs_file_lock_dotl, 891 .flock = v9fs_file_flock_dotl, 892 .mmap = generic_file_readonly_mmap, 893 .fsync = v9fs_file_fsync_dotl, 894 }; 895 896 const struct file_operations v9fs_mmap_file_operations = { 897 .llseek = generic_file_llseek, 898 .read = v9fs_mmap_file_read, 899 .write = v9fs_mmap_file_write, 900 .open = v9fs_file_open, 901 .release = v9fs_dir_release, 902 .lock = v9fs_file_lock, 903 .mmap = v9fs_mmap_file_mmap, 904 .fsync = v9fs_file_fsync, 905 }; 906 907 const struct file_operations v9fs_mmap_file_operations_dotl = { 908 .llseek = generic_file_llseek, 909 .read = v9fs_mmap_file_read, 910 .write = v9fs_mmap_file_write, 911 .open = v9fs_file_open, 912 .release = v9fs_dir_release, 913 .lock = v9fs_file_lock_dotl, 914 .flock = v9fs_file_flock_dotl, 915 .mmap = v9fs_mmap_file_mmap, 916 .fsync = v9fs_file_fsync_dotl, 917 }; 918