1 /* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7 */ 8 9 #include "fuse_i.h" 10 11 #include <linux/pagemap.h> 12 #include <linux/slab.h> 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 16 static const struct file_operations fuse_direct_io_file_operations; 17 18 static int fuse_send_open(struct inode *inode, struct file *file, int isdir, 19 struct fuse_open_out *outargp) 20 { 21 struct fuse_conn *fc = get_fuse_conn(inode); 22 struct fuse_open_in inarg; 23 struct fuse_req *req; 24 int err; 25 26 req = fuse_get_req(fc); 27 if (IS_ERR(req)) 28 return PTR_ERR(req); 29 30 memset(&inarg, 0, sizeof(inarg)); 31 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); 32 if (!fc->atomic_o_trunc) 33 inarg.flags &= ~O_TRUNC; 34 req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; 35 req->in.h.nodeid = get_node_id(inode); 36 req->in.numargs = 1; 37 req->in.args[0].size = sizeof(inarg); 38 req->in.args[0].value = &inarg; 39 req->out.numargs = 1; 40 req->out.args[0].size = sizeof(*outargp); 41 req->out.args[0].value = outargp; 42 request_send(fc, req); 43 err = req->out.h.error; 44 fuse_put_request(fc, req); 45 46 return err; 47 } 48 49 struct fuse_file *fuse_file_alloc(void) 50 { 51 struct fuse_file *ff; 52 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); 53 if (ff) { 54 ff->reserved_req = fuse_request_alloc(); 55 if (!ff->reserved_req) { 56 kfree(ff); 57 ff = NULL; 58 } else { 59 INIT_LIST_HEAD(&ff->write_entry); 60 atomic_set(&ff->count, 0); 61 } 62 } 63 return ff; 64 } 65 66 void fuse_file_free(struct fuse_file *ff) 67 { 68 fuse_request_free(ff->reserved_req); 69 kfree(ff); 70 } 71 72 static struct fuse_file *fuse_file_get(struct fuse_file *ff) 73 { 74 atomic_inc(&ff->count); 75 return ff; 76 } 77 78 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) 79 { 80 dput(req->misc.release.dentry); 81 mntput(req->misc.release.vfsmount); 82 fuse_put_request(fc, req); 83 } 84 85 static void fuse_file_put(struct fuse_file *ff) 86 { 87 if (atomic_dec_and_test(&ff->count)) { 88 struct fuse_req *req = ff->reserved_req; 89 struct inode *inode = req->misc.release.dentry->d_inode; 90 struct fuse_conn *fc = get_fuse_conn(inode); 91 req->end = fuse_release_end; 92 request_send_background(fc, req); 93 kfree(ff); 94 } 95 } 96 97 void fuse_finish_open(struct inode *inode, struct file *file, 98 struct fuse_file *ff, struct fuse_open_out *outarg) 99 { 100 if (outarg->open_flags & FOPEN_DIRECT_IO) 101 file->f_op = &fuse_direct_io_file_operations; 102 if (!(outarg->open_flags & FOPEN_KEEP_CACHE)) 103 invalidate_inode_pages2(inode->i_mapping); 104 ff->fh = outarg->fh; 105 file->private_data = fuse_file_get(ff); 106 } 107 108 int fuse_open_common(struct inode *inode, struct file *file, int isdir) 109 { 110 struct fuse_open_out outarg; 111 struct fuse_file *ff; 112 int err; 113 114 /* VFS checks this, but only _after_ ->open() */ 115 if (file->f_flags & O_DIRECT) 116 return -EINVAL; 117 118 err = generic_file_open(inode, file); 119 if (err) 120 return err; 121 122 ff = fuse_file_alloc(); 123 if (!ff) 124 return -ENOMEM; 125 126 err = fuse_send_open(inode, file, isdir, &outarg); 127 if (err) 128 fuse_file_free(ff); 129 else { 130 if (isdir) 131 outarg.open_flags &= ~FOPEN_DIRECT_IO; 132 fuse_finish_open(inode, file, ff, &outarg); 133 } 134 135 return err; 136 } 137 138 void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode) 139 { 140 struct fuse_req *req = ff->reserved_req; 141 struct fuse_release_in *inarg = &req->misc.release.in; 142 143 inarg->fh = ff->fh; 144 inarg->flags = flags; 145 req->in.h.opcode = opcode; 146 req->in.h.nodeid = nodeid; 147 req->in.numargs = 1; 148 req->in.args[0].size = sizeof(struct fuse_release_in); 149 req->in.args[0].value = inarg; 150 } 151 152 int fuse_release_common(struct inode *inode, struct file *file, int isdir) 153 { 154 struct fuse_file *ff = file->private_data; 155 if (ff) { 156 struct fuse_conn *fc = get_fuse_conn(inode); 157 struct fuse_req *req = ff->reserved_req; 158 159 fuse_release_fill(ff, get_node_id(inode), file->f_flags, 160 isdir ? FUSE_RELEASEDIR : FUSE_RELEASE); 161 162 /* Hold vfsmount and dentry until release is finished */ 163 req->misc.release.vfsmount = mntget(file->f_path.mnt); 164 req->misc.release.dentry = dget(file->f_path.dentry); 165 166 spin_lock(&fc->lock); 167 list_del(&ff->write_entry); 168 spin_unlock(&fc->lock); 169 /* 170 * Normally this will send the RELEASE request, 171 * however if some asynchronous READ or WRITE requests 172 * are outstanding, the sending will be delayed 173 */ 174 fuse_file_put(ff); 175 } 176 177 /* Return value is ignored by VFS */ 178 return 0; 179 } 180 181 static int fuse_open(struct inode *inode, struct file *file) 182 { 183 return fuse_open_common(inode, file, 0); 184 } 185 186 static int fuse_release(struct inode *inode, struct file *file) 187 { 188 return fuse_release_common(inode, file, 0); 189 } 190 191 /* 192 * Scramble the ID space with XTEA, so that the value of the files_struct 193 * pointer is not exposed to userspace. 194 */ 195 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) 196 { 197 u32 *k = fc->scramble_key; 198 u64 v = (unsigned long) id; 199 u32 v0 = v; 200 u32 v1 = v >> 32; 201 u32 sum = 0; 202 int i; 203 204 for (i = 0; i < 32; i++) { 205 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); 206 sum += 0x9E3779B9; 207 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); 208 } 209 210 return (u64) v0 + ((u64) v1 << 32); 211 } 212 213 /* 214 * Check if page is under writeback 215 * 216 * This is currently done by walking the list of writepage requests 217 * for the inode, which can be pretty inefficient. 218 */ 219 static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index) 220 { 221 struct fuse_conn *fc = get_fuse_conn(inode); 222 struct fuse_inode *fi = get_fuse_inode(inode); 223 struct fuse_req *req; 224 bool found = false; 225 226 spin_lock(&fc->lock); 227 list_for_each_entry(req, &fi->writepages, writepages_entry) { 228 pgoff_t curr_index; 229 230 BUG_ON(req->inode != inode); 231 curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT; 232 if (curr_index == index) { 233 found = true; 234 break; 235 } 236 } 237 spin_unlock(&fc->lock); 238 239 return found; 240 } 241 242 /* 243 * Wait for page writeback to be completed. 244 * 245 * Since fuse doesn't rely on the VM writeback tracking, this has to 246 * use some other means. 247 */ 248 static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) 249 { 250 struct fuse_inode *fi = get_fuse_inode(inode); 251 252 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); 253 return 0; 254 } 255 256 static int fuse_flush(struct file *file, fl_owner_t id) 257 { 258 struct inode *inode = file->f_path.dentry->d_inode; 259 struct fuse_conn *fc = get_fuse_conn(inode); 260 struct fuse_file *ff = file->private_data; 261 struct fuse_req *req; 262 struct fuse_flush_in inarg; 263 int err; 264 265 if (is_bad_inode(inode)) 266 return -EIO; 267 268 if (fc->no_flush) 269 return 0; 270 271 req = fuse_get_req_nofail(fc, file); 272 memset(&inarg, 0, sizeof(inarg)); 273 inarg.fh = ff->fh; 274 inarg.lock_owner = fuse_lock_owner_id(fc, id); 275 req->in.h.opcode = FUSE_FLUSH; 276 req->in.h.nodeid = get_node_id(inode); 277 req->in.numargs = 1; 278 req->in.args[0].size = sizeof(inarg); 279 req->in.args[0].value = &inarg; 280 req->force = 1; 281 request_send(fc, req); 282 err = req->out.h.error; 283 fuse_put_request(fc, req); 284 if (err == -ENOSYS) { 285 fc->no_flush = 1; 286 err = 0; 287 } 288 return err; 289 } 290 291 /* 292 * Wait for all pending writepages on the inode to finish. 293 * 294 * This is currently done by blocking further writes with FUSE_NOWRITE 295 * and waiting for all sent writes to complete. 296 * 297 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage 298 * could conflict with truncation. 299 */ 300 static void fuse_sync_writes(struct inode *inode) 301 { 302 fuse_set_nowrite(inode); 303 fuse_release_nowrite(inode); 304 } 305 306 int fuse_fsync_common(struct file *file, struct dentry *de, int datasync, 307 int isdir) 308 { 309 struct inode *inode = de->d_inode; 310 struct fuse_conn *fc = get_fuse_conn(inode); 311 struct fuse_file *ff = file->private_data; 312 struct fuse_req *req; 313 struct fuse_fsync_in inarg; 314 int err; 315 316 if (is_bad_inode(inode)) 317 return -EIO; 318 319 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) 320 return 0; 321 322 /* 323 * Start writeback against all dirty pages of the inode, then 324 * wait for all outstanding writes, before sending the FSYNC 325 * request. 326 */ 327 err = write_inode_now(inode, 0); 328 if (err) 329 return err; 330 331 fuse_sync_writes(inode); 332 333 req = fuse_get_req(fc); 334 if (IS_ERR(req)) 335 return PTR_ERR(req); 336 337 memset(&inarg, 0, sizeof(inarg)); 338 inarg.fh = ff->fh; 339 inarg.fsync_flags = datasync ? 1 : 0; 340 req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; 341 req->in.h.nodeid = get_node_id(inode); 342 req->in.numargs = 1; 343 req->in.args[0].size = sizeof(inarg); 344 req->in.args[0].value = &inarg; 345 request_send(fc, req); 346 err = req->out.h.error; 347 fuse_put_request(fc, req); 348 if (err == -ENOSYS) { 349 if (isdir) 350 fc->no_fsyncdir = 1; 351 else 352 fc->no_fsync = 1; 353 err = 0; 354 } 355 return err; 356 } 357 358 static int fuse_fsync(struct file *file, struct dentry *de, int datasync) 359 { 360 return fuse_fsync_common(file, de, datasync, 0); 361 } 362 363 void fuse_read_fill(struct fuse_req *req, struct file *file, 364 struct inode *inode, loff_t pos, size_t count, int opcode) 365 { 366 struct fuse_read_in *inarg = &req->misc.read.in; 367 struct fuse_file *ff = file->private_data; 368 369 inarg->fh = ff->fh; 370 inarg->offset = pos; 371 inarg->size = count; 372 inarg->flags = file->f_flags; 373 req->in.h.opcode = opcode; 374 req->in.h.nodeid = get_node_id(inode); 375 req->in.numargs = 1; 376 req->in.args[0].size = sizeof(struct fuse_read_in); 377 req->in.args[0].value = inarg; 378 req->out.argpages = 1; 379 req->out.argvar = 1; 380 req->out.numargs = 1; 381 req->out.args[0].size = count; 382 } 383 384 static size_t fuse_send_read(struct fuse_req *req, struct file *file, 385 struct inode *inode, loff_t pos, size_t count, 386 fl_owner_t owner) 387 { 388 struct fuse_conn *fc = get_fuse_conn(inode); 389 390 fuse_read_fill(req, file, inode, pos, count, FUSE_READ); 391 if (owner != NULL) { 392 struct fuse_read_in *inarg = &req->misc.read.in; 393 394 inarg->read_flags |= FUSE_READ_LOCKOWNER; 395 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 396 } 397 request_send(fc, req); 398 return req->out.args[0].size; 399 } 400 401 static void fuse_read_update_size(struct inode *inode, loff_t size, 402 u64 attr_ver) 403 { 404 struct fuse_conn *fc = get_fuse_conn(inode); 405 struct fuse_inode *fi = get_fuse_inode(inode); 406 407 spin_lock(&fc->lock); 408 if (attr_ver == fi->attr_version && size < inode->i_size) { 409 fi->attr_version = ++fc->attr_version; 410 i_size_write(inode, size); 411 } 412 spin_unlock(&fc->lock); 413 } 414 415 static int fuse_readpage(struct file *file, struct page *page) 416 { 417 struct inode *inode = page->mapping->host; 418 struct fuse_conn *fc = get_fuse_conn(inode); 419 struct fuse_req *req; 420 size_t num_read; 421 loff_t pos = page_offset(page); 422 size_t count = PAGE_CACHE_SIZE; 423 u64 attr_ver; 424 int err; 425 426 err = -EIO; 427 if (is_bad_inode(inode)) 428 goto out; 429 430 /* 431 * Page writeback can extend beyond the liftime of the 432 * page-cache page, so make sure we read a properly synced 433 * page. 434 */ 435 fuse_wait_on_page_writeback(inode, page->index); 436 437 req = fuse_get_req(fc); 438 err = PTR_ERR(req); 439 if (IS_ERR(req)) 440 goto out; 441 442 attr_ver = fuse_get_attr_version(fc); 443 444 req->out.page_zeroing = 1; 445 req->num_pages = 1; 446 req->pages[0] = page; 447 num_read = fuse_send_read(req, file, inode, pos, count, NULL); 448 err = req->out.h.error; 449 fuse_put_request(fc, req); 450 451 if (!err) { 452 /* 453 * Short read means EOF. If file size is larger, truncate it 454 */ 455 if (num_read < count) 456 fuse_read_update_size(inode, pos + num_read, attr_ver); 457 458 SetPageUptodate(page); 459 } 460 461 fuse_invalidate_attr(inode); /* atime changed */ 462 out: 463 unlock_page(page); 464 return err; 465 } 466 467 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) 468 { 469 int i; 470 size_t count = req->misc.read.in.size; 471 size_t num_read = req->out.args[0].size; 472 struct inode *inode = req->pages[0]->mapping->host; 473 474 /* 475 * Short read means EOF. If file size is larger, truncate it 476 */ 477 if (!req->out.h.error && num_read < count) { 478 loff_t pos = page_offset(req->pages[0]) + num_read; 479 fuse_read_update_size(inode, pos, req->misc.read.attr_ver); 480 } 481 482 fuse_invalidate_attr(inode); /* atime changed */ 483 484 for (i = 0; i < req->num_pages; i++) { 485 struct page *page = req->pages[i]; 486 if (!req->out.h.error) 487 SetPageUptodate(page); 488 else 489 SetPageError(page); 490 unlock_page(page); 491 } 492 if (req->ff) 493 fuse_file_put(req->ff); 494 fuse_put_request(fc, req); 495 } 496 497 static void fuse_send_readpages(struct fuse_req *req, struct file *file, 498 struct inode *inode) 499 { 500 struct fuse_conn *fc = get_fuse_conn(inode); 501 loff_t pos = page_offset(req->pages[0]); 502 size_t count = req->num_pages << PAGE_CACHE_SHIFT; 503 req->out.page_zeroing = 1; 504 fuse_read_fill(req, file, inode, pos, count, FUSE_READ); 505 req->misc.read.attr_ver = fuse_get_attr_version(fc); 506 if (fc->async_read) { 507 struct fuse_file *ff = file->private_data; 508 req->ff = fuse_file_get(ff); 509 req->end = fuse_readpages_end; 510 request_send_background(fc, req); 511 } else { 512 request_send(fc, req); 513 fuse_readpages_end(fc, req); 514 } 515 } 516 517 struct fuse_fill_data { 518 struct fuse_req *req; 519 struct file *file; 520 struct inode *inode; 521 }; 522 523 static int fuse_readpages_fill(void *_data, struct page *page) 524 { 525 struct fuse_fill_data *data = _data; 526 struct fuse_req *req = data->req; 527 struct inode *inode = data->inode; 528 struct fuse_conn *fc = get_fuse_conn(inode); 529 530 fuse_wait_on_page_writeback(inode, page->index); 531 532 if (req->num_pages && 533 (req->num_pages == FUSE_MAX_PAGES_PER_REQ || 534 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || 535 req->pages[req->num_pages - 1]->index + 1 != page->index)) { 536 fuse_send_readpages(req, data->file, inode); 537 data->req = req = fuse_get_req(fc); 538 if (IS_ERR(req)) { 539 unlock_page(page); 540 return PTR_ERR(req); 541 } 542 } 543 req->pages[req->num_pages] = page; 544 req->num_pages ++; 545 return 0; 546 } 547 548 static int fuse_readpages(struct file *file, struct address_space *mapping, 549 struct list_head *pages, unsigned nr_pages) 550 { 551 struct inode *inode = mapping->host; 552 struct fuse_conn *fc = get_fuse_conn(inode); 553 struct fuse_fill_data data; 554 int err; 555 556 err = -EIO; 557 if (is_bad_inode(inode)) 558 goto out; 559 560 data.file = file; 561 data.inode = inode; 562 data.req = fuse_get_req(fc); 563 err = PTR_ERR(data.req); 564 if (IS_ERR(data.req)) 565 goto out; 566 567 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); 568 if (!err) { 569 if (data.req->num_pages) 570 fuse_send_readpages(data.req, file, inode); 571 else 572 fuse_put_request(fc, data.req); 573 } 574 out: 575 return err; 576 } 577 578 static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov, 579 unsigned long nr_segs, loff_t pos) 580 { 581 struct inode *inode = iocb->ki_filp->f_mapping->host; 582 583 if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) { 584 int err; 585 /* 586 * If trying to read past EOF, make sure the i_size 587 * attribute is up-to-date. 588 */ 589 err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL); 590 if (err) 591 return err; 592 } 593 594 return generic_file_aio_read(iocb, iov, nr_segs, pos); 595 } 596 597 static void fuse_write_fill(struct fuse_req *req, struct file *file, 598 struct fuse_file *ff, struct inode *inode, 599 loff_t pos, size_t count, int writepage) 600 { 601 struct fuse_conn *fc = get_fuse_conn(inode); 602 struct fuse_write_in *inarg = &req->misc.write.in; 603 struct fuse_write_out *outarg = &req->misc.write.out; 604 605 memset(inarg, 0, sizeof(struct fuse_write_in)); 606 inarg->fh = ff->fh; 607 inarg->offset = pos; 608 inarg->size = count; 609 inarg->write_flags = writepage ? FUSE_WRITE_CACHE : 0; 610 inarg->flags = file ? file->f_flags : 0; 611 req->in.h.opcode = FUSE_WRITE; 612 req->in.h.nodeid = get_node_id(inode); 613 req->in.argpages = 1; 614 req->in.numargs = 2; 615 if (fc->minor < 9) 616 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; 617 else 618 req->in.args[0].size = sizeof(struct fuse_write_in); 619 req->in.args[0].value = inarg; 620 req->in.args[1].size = count; 621 req->out.numargs = 1; 622 req->out.args[0].size = sizeof(struct fuse_write_out); 623 req->out.args[0].value = outarg; 624 } 625 626 static size_t fuse_send_write(struct fuse_req *req, struct file *file, 627 struct inode *inode, loff_t pos, size_t count, 628 fl_owner_t owner) 629 { 630 struct fuse_conn *fc = get_fuse_conn(inode); 631 fuse_write_fill(req, file, file->private_data, inode, pos, count, 0); 632 if (owner != NULL) { 633 struct fuse_write_in *inarg = &req->misc.write.in; 634 inarg->write_flags |= FUSE_WRITE_LOCKOWNER; 635 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 636 } 637 request_send(fc, req); 638 return req->misc.write.out.size; 639 } 640 641 static int fuse_write_begin(struct file *file, struct address_space *mapping, 642 loff_t pos, unsigned len, unsigned flags, 643 struct page **pagep, void **fsdata) 644 { 645 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 646 647 *pagep = __grab_cache_page(mapping, index); 648 if (!*pagep) 649 return -ENOMEM; 650 return 0; 651 } 652 653 static void fuse_write_update_size(struct inode *inode, loff_t pos) 654 { 655 struct fuse_conn *fc = get_fuse_conn(inode); 656 struct fuse_inode *fi = get_fuse_inode(inode); 657 658 spin_lock(&fc->lock); 659 fi->attr_version = ++fc->attr_version; 660 if (pos > inode->i_size) 661 i_size_write(inode, pos); 662 spin_unlock(&fc->lock); 663 } 664 665 static int fuse_buffered_write(struct file *file, struct inode *inode, 666 loff_t pos, unsigned count, struct page *page) 667 { 668 int err; 669 size_t nres; 670 struct fuse_conn *fc = get_fuse_conn(inode); 671 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 672 struct fuse_req *req; 673 674 if (is_bad_inode(inode)) 675 return -EIO; 676 677 /* 678 * Make sure writepages on the same page are not mixed up with 679 * plain writes. 680 */ 681 fuse_wait_on_page_writeback(inode, page->index); 682 683 req = fuse_get_req(fc); 684 if (IS_ERR(req)) 685 return PTR_ERR(req); 686 687 req->num_pages = 1; 688 req->pages[0] = page; 689 req->page_offset = offset; 690 nres = fuse_send_write(req, file, inode, pos, count, NULL); 691 err = req->out.h.error; 692 fuse_put_request(fc, req); 693 if (!err && !nres) 694 err = -EIO; 695 if (!err) { 696 pos += nres; 697 fuse_write_update_size(inode, pos); 698 if (count == PAGE_CACHE_SIZE) 699 SetPageUptodate(page); 700 } 701 fuse_invalidate_attr(inode); 702 return err ? err : nres; 703 } 704 705 static int fuse_write_end(struct file *file, struct address_space *mapping, 706 loff_t pos, unsigned len, unsigned copied, 707 struct page *page, void *fsdata) 708 { 709 struct inode *inode = mapping->host; 710 int res = 0; 711 712 if (copied) 713 res = fuse_buffered_write(file, inode, pos, copied, page); 714 715 unlock_page(page); 716 page_cache_release(page); 717 return res; 718 } 719 720 static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, 721 struct inode *inode, loff_t pos, 722 size_t count) 723 { 724 size_t res; 725 unsigned offset; 726 unsigned i; 727 728 for (i = 0; i < req->num_pages; i++) 729 fuse_wait_on_page_writeback(inode, req->pages[i]->index); 730 731 res = fuse_send_write(req, file, inode, pos, count, NULL); 732 733 offset = req->page_offset; 734 count = res; 735 for (i = 0; i < req->num_pages; i++) { 736 struct page *page = req->pages[i]; 737 738 if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE) 739 SetPageUptodate(page); 740 741 if (count > PAGE_CACHE_SIZE - offset) 742 count -= PAGE_CACHE_SIZE - offset; 743 else 744 count = 0; 745 offset = 0; 746 747 unlock_page(page); 748 page_cache_release(page); 749 } 750 751 return res; 752 } 753 754 static ssize_t fuse_fill_write_pages(struct fuse_req *req, 755 struct address_space *mapping, 756 struct iov_iter *ii, loff_t pos) 757 { 758 struct fuse_conn *fc = get_fuse_conn(mapping->host); 759 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 760 size_t count = 0; 761 int err; 762 763 req->page_offset = offset; 764 765 do { 766 size_t tmp; 767 struct page *page; 768 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 769 size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, 770 iov_iter_count(ii)); 771 772 bytes = min_t(size_t, bytes, fc->max_write - count); 773 774 again: 775 err = -EFAULT; 776 if (iov_iter_fault_in_readable(ii, bytes)) 777 break; 778 779 err = -ENOMEM; 780 page = __grab_cache_page(mapping, index); 781 if (!page) 782 break; 783 784 pagefault_disable(); 785 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); 786 pagefault_enable(); 787 flush_dcache_page(page); 788 789 if (!tmp) { 790 unlock_page(page); 791 page_cache_release(page); 792 bytes = min(bytes, iov_iter_single_seg_count(ii)); 793 goto again; 794 } 795 796 err = 0; 797 req->pages[req->num_pages] = page; 798 req->num_pages++; 799 800 iov_iter_advance(ii, tmp); 801 count += tmp; 802 pos += tmp; 803 offset += tmp; 804 if (offset == PAGE_CACHE_SIZE) 805 offset = 0; 806 807 if (!fc->big_writes) 808 break; 809 } while (iov_iter_count(ii) && count < fc->max_write && 810 req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0); 811 812 return count > 0 ? count : err; 813 } 814 815 static ssize_t fuse_perform_write(struct file *file, 816 struct address_space *mapping, 817 struct iov_iter *ii, loff_t pos) 818 { 819 struct inode *inode = mapping->host; 820 struct fuse_conn *fc = get_fuse_conn(inode); 821 int err = 0; 822 ssize_t res = 0; 823 824 if (is_bad_inode(inode)) 825 return -EIO; 826 827 do { 828 struct fuse_req *req; 829 ssize_t count; 830 831 req = fuse_get_req(fc); 832 if (IS_ERR(req)) { 833 err = PTR_ERR(req); 834 break; 835 } 836 837 count = fuse_fill_write_pages(req, mapping, ii, pos); 838 if (count <= 0) { 839 err = count; 840 } else { 841 size_t num_written; 842 843 num_written = fuse_send_write_pages(req, file, inode, 844 pos, count); 845 err = req->out.h.error; 846 if (!err) { 847 res += num_written; 848 pos += num_written; 849 850 /* break out of the loop on short write */ 851 if (num_written != count) 852 err = -EIO; 853 } 854 } 855 fuse_put_request(fc, req); 856 } while (!err && iov_iter_count(ii)); 857 858 if (res > 0) 859 fuse_write_update_size(inode, pos); 860 861 fuse_invalidate_attr(inode); 862 863 return res > 0 ? res : err; 864 } 865 866 static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov, 867 unsigned long nr_segs, loff_t pos) 868 { 869 struct file *file = iocb->ki_filp; 870 struct address_space *mapping = file->f_mapping; 871 size_t count = 0; 872 ssize_t written = 0; 873 struct inode *inode = mapping->host; 874 ssize_t err; 875 struct iov_iter i; 876 877 WARN_ON(iocb->ki_pos != pos); 878 879 err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ); 880 if (err) 881 return err; 882 883 mutex_lock(&inode->i_mutex); 884 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 885 886 /* We can write back this queue in page reclaim */ 887 current->backing_dev_info = mapping->backing_dev_info; 888 889 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 890 if (err) 891 goto out; 892 893 if (count == 0) 894 goto out; 895 896 err = remove_suid(file->f_path.dentry); 897 if (err) 898 goto out; 899 900 file_update_time(file); 901 902 iov_iter_init(&i, iov, nr_segs, count, 0); 903 written = fuse_perform_write(file, mapping, &i, pos); 904 if (written >= 0) 905 iocb->ki_pos = pos + written; 906 907 out: 908 current->backing_dev_info = NULL; 909 mutex_unlock(&inode->i_mutex); 910 911 return written ? written : err; 912 } 913 914 static void fuse_release_user_pages(struct fuse_req *req, int write) 915 { 916 unsigned i; 917 918 for (i = 0; i < req->num_pages; i++) { 919 struct page *page = req->pages[i]; 920 if (write) 921 set_page_dirty_lock(page); 922 put_page(page); 923 } 924 } 925 926 static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf, 927 unsigned nbytes, int write) 928 { 929 unsigned long user_addr = (unsigned long) buf; 930 unsigned offset = user_addr & ~PAGE_MASK; 931 int npages; 932 933 /* This doesn't work with nfsd */ 934 if (!current->mm) 935 return -EPERM; 936 937 nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); 938 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 939 npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ); 940 down_read(¤t->mm->mmap_sem); 941 npages = get_user_pages(current, current->mm, user_addr, npages, write, 942 0, req->pages, NULL); 943 up_read(¤t->mm->mmap_sem); 944 if (npages < 0) 945 return npages; 946 947 req->num_pages = npages; 948 req->page_offset = offset; 949 return 0; 950 } 951 952 static ssize_t fuse_direct_io(struct file *file, const char __user *buf, 953 size_t count, loff_t *ppos, int write) 954 { 955 struct inode *inode = file->f_path.dentry->d_inode; 956 struct fuse_conn *fc = get_fuse_conn(inode); 957 size_t nmax = write ? fc->max_write : fc->max_read; 958 loff_t pos = *ppos; 959 ssize_t res = 0; 960 struct fuse_req *req; 961 962 if (is_bad_inode(inode)) 963 return -EIO; 964 965 req = fuse_get_req(fc); 966 if (IS_ERR(req)) 967 return PTR_ERR(req); 968 969 while (count) { 970 size_t nres; 971 size_t nbytes_limit = min(count, nmax); 972 size_t nbytes; 973 int err = fuse_get_user_pages(req, buf, nbytes_limit, !write); 974 if (err) { 975 res = err; 976 break; 977 } 978 nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset; 979 nbytes = min(nbytes_limit, nbytes); 980 if (write) 981 nres = fuse_send_write(req, file, inode, pos, nbytes, 982 current->files); 983 else 984 nres = fuse_send_read(req, file, inode, pos, nbytes, 985 current->files); 986 fuse_release_user_pages(req, !write); 987 if (req->out.h.error) { 988 if (!res) 989 res = req->out.h.error; 990 break; 991 } else if (nres > nbytes) { 992 res = -EIO; 993 break; 994 } 995 count -= nres; 996 res += nres; 997 pos += nres; 998 buf += nres; 999 if (nres != nbytes) 1000 break; 1001 if (count) { 1002 fuse_put_request(fc, req); 1003 req = fuse_get_req(fc); 1004 if (IS_ERR(req)) 1005 break; 1006 } 1007 } 1008 fuse_put_request(fc, req); 1009 if (res > 0) { 1010 if (write) 1011 fuse_write_update_size(inode, pos); 1012 *ppos = pos; 1013 } 1014 fuse_invalidate_attr(inode); 1015 1016 return res; 1017 } 1018 1019 static ssize_t fuse_direct_read(struct file *file, char __user *buf, 1020 size_t count, loff_t *ppos) 1021 { 1022 return fuse_direct_io(file, buf, count, ppos, 0); 1023 } 1024 1025 static ssize_t fuse_direct_write(struct file *file, const char __user *buf, 1026 size_t count, loff_t *ppos) 1027 { 1028 struct inode *inode = file->f_path.dentry->d_inode; 1029 ssize_t res; 1030 /* Don't allow parallel writes to the same file */ 1031 mutex_lock(&inode->i_mutex); 1032 res = generic_write_checks(file, ppos, &count, 0); 1033 if (!res) 1034 res = fuse_direct_io(file, buf, count, ppos, 1); 1035 mutex_unlock(&inode->i_mutex); 1036 return res; 1037 } 1038 1039 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) 1040 { 1041 __free_page(req->pages[0]); 1042 fuse_file_put(req->ff); 1043 fuse_put_request(fc, req); 1044 } 1045 1046 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) 1047 { 1048 struct inode *inode = req->inode; 1049 struct fuse_inode *fi = get_fuse_inode(inode); 1050 struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; 1051 1052 list_del(&req->writepages_entry); 1053 dec_bdi_stat(bdi, BDI_WRITEBACK); 1054 dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP); 1055 bdi_writeout_inc(bdi); 1056 wake_up(&fi->page_waitq); 1057 } 1058 1059 /* Called under fc->lock, may release and reacquire it */ 1060 static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req) 1061 { 1062 struct fuse_inode *fi = get_fuse_inode(req->inode); 1063 loff_t size = i_size_read(req->inode); 1064 struct fuse_write_in *inarg = &req->misc.write.in; 1065 1066 if (!fc->connected) 1067 goto out_free; 1068 1069 if (inarg->offset + PAGE_CACHE_SIZE <= size) { 1070 inarg->size = PAGE_CACHE_SIZE; 1071 } else if (inarg->offset < size) { 1072 inarg->size = size & (PAGE_CACHE_SIZE - 1); 1073 } else { 1074 /* Got truncated off completely */ 1075 goto out_free; 1076 } 1077 1078 req->in.args[1].size = inarg->size; 1079 fi->writectr++; 1080 request_send_background_locked(fc, req); 1081 return; 1082 1083 out_free: 1084 fuse_writepage_finish(fc, req); 1085 spin_unlock(&fc->lock); 1086 fuse_writepage_free(fc, req); 1087 spin_lock(&fc->lock); 1088 } 1089 1090 /* 1091 * If fi->writectr is positive (no truncate or fsync going on) send 1092 * all queued writepage requests. 1093 * 1094 * Called with fc->lock 1095 */ 1096 void fuse_flush_writepages(struct inode *inode) 1097 { 1098 struct fuse_conn *fc = get_fuse_conn(inode); 1099 struct fuse_inode *fi = get_fuse_inode(inode); 1100 struct fuse_req *req; 1101 1102 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { 1103 req = list_entry(fi->queued_writes.next, struct fuse_req, list); 1104 list_del_init(&req->list); 1105 fuse_send_writepage(fc, req); 1106 } 1107 } 1108 1109 static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req) 1110 { 1111 struct inode *inode = req->inode; 1112 struct fuse_inode *fi = get_fuse_inode(inode); 1113 1114 mapping_set_error(inode->i_mapping, req->out.h.error); 1115 spin_lock(&fc->lock); 1116 fi->writectr--; 1117 fuse_writepage_finish(fc, req); 1118 spin_unlock(&fc->lock); 1119 fuse_writepage_free(fc, req); 1120 } 1121 1122 static int fuse_writepage_locked(struct page *page) 1123 { 1124 struct address_space *mapping = page->mapping; 1125 struct inode *inode = mapping->host; 1126 struct fuse_conn *fc = get_fuse_conn(inode); 1127 struct fuse_inode *fi = get_fuse_inode(inode); 1128 struct fuse_req *req; 1129 struct fuse_file *ff; 1130 struct page *tmp_page; 1131 1132 set_page_writeback(page); 1133 1134 req = fuse_request_alloc_nofs(); 1135 if (!req) 1136 goto err; 1137 1138 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 1139 if (!tmp_page) 1140 goto err_free; 1141 1142 spin_lock(&fc->lock); 1143 BUG_ON(list_empty(&fi->write_files)); 1144 ff = list_entry(fi->write_files.next, struct fuse_file, write_entry); 1145 req->ff = fuse_file_get(ff); 1146 spin_unlock(&fc->lock); 1147 1148 fuse_write_fill(req, NULL, ff, inode, page_offset(page), 0, 1); 1149 1150 copy_highpage(tmp_page, page); 1151 req->num_pages = 1; 1152 req->pages[0] = tmp_page; 1153 req->page_offset = 0; 1154 req->end = fuse_writepage_end; 1155 req->inode = inode; 1156 1157 inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK); 1158 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); 1159 end_page_writeback(page); 1160 1161 spin_lock(&fc->lock); 1162 list_add(&req->writepages_entry, &fi->writepages); 1163 list_add_tail(&req->list, &fi->queued_writes); 1164 fuse_flush_writepages(inode); 1165 spin_unlock(&fc->lock); 1166 1167 return 0; 1168 1169 err_free: 1170 fuse_request_free(req); 1171 err: 1172 end_page_writeback(page); 1173 return -ENOMEM; 1174 } 1175 1176 static int fuse_writepage(struct page *page, struct writeback_control *wbc) 1177 { 1178 int err; 1179 1180 err = fuse_writepage_locked(page); 1181 unlock_page(page); 1182 1183 return err; 1184 } 1185 1186 static int fuse_launder_page(struct page *page) 1187 { 1188 int err = 0; 1189 if (clear_page_dirty_for_io(page)) { 1190 struct inode *inode = page->mapping->host; 1191 err = fuse_writepage_locked(page); 1192 if (!err) 1193 fuse_wait_on_page_writeback(inode, page->index); 1194 } 1195 return err; 1196 } 1197 1198 /* 1199 * Write back dirty pages now, because there may not be any suitable 1200 * open files later 1201 */ 1202 static void fuse_vma_close(struct vm_area_struct *vma) 1203 { 1204 filemap_write_and_wait(vma->vm_file->f_mapping); 1205 } 1206 1207 /* 1208 * Wait for writeback against this page to complete before allowing it 1209 * to be marked dirty again, and hence written back again, possibly 1210 * before the previous writepage completed. 1211 * 1212 * Block here, instead of in ->writepage(), so that the userspace fs 1213 * can only block processes actually operating on the filesystem. 1214 * 1215 * Otherwise unprivileged userspace fs would be able to block 1216 * unrelated: 1217 * 1218 * - page migration 1219 * - sync(2) 1220 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER 1221 */ 1222 static int fuse_page_mkwrite(struct vm_area_struct *vma, struct page *page) 1223 { 1224 /* 1225 * Don't use page->mapping as it may become NULL from a 1226 * concurrent truncate. 1227 */ 1228 struct inode *inode = vma->vm_file->f_mapping->host; 1229 1230 fuse_wait_on_page_writeback(inode, page->index); 1231 return 0; 1232 } 1233 1234 static struct vm_operations_struct fuse_file_vm_ops = { 1235 .close = fuse_vma_close, 1236 .fault = filemap_fault, 1237 .page_mkwrite = fuse_page_mkwrite, 1238 }; 1239 1240 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) 1241 { 1242 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) { 1243 struct inode *inode = file->f_dentry->d_inode; 1244 struct fuse_conn *fc = get_fuse_conn(inode); 1245 struct fuse_inode *fi = get_fuse_inode(inode); 1246 struct fuse_file *ff = file->private_data; 1247 /* 1248 * file may be written through mmap, so chain it onto the 1249 * inodes's write_file list 1250 */ 1251 spin_lock(&fc->lock); 1252 if (list_empty(&ff->write_entry)) 1253 list_add(&ff->write_entry, &fi->write_files); 1254 spin_unlock(&fc->lock); 1255 } 1256 file_accessed(file); 1257 vma->vm_ops = &fuse_file_vm_ops; 1258 return 0; 1259 } 1260 1261 static int convert_fuse_file_lock(const struct fuse_file_lock *ffl, 1262 struct file_lock *fl) 1263 { 1264 switch (ffl->type) { 1265 case F_UNLCK: 1266 break; 1267 1268 case F_RDLCK: 1269 case F_WRLCK: 1270 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX || 1271 ffl->end < ffl->start) 1272 return -EIO; 1273 1274 fl->fl_start = ffl->start; 1275 fl->fl_end = ffl->end; 1276 fl->fl_pid = ffl->pid; 1277 break; 1278 1279 default: 1280 return -EIO; 1281 } 1282 fl->fl_type = ffl->type; 1283 return 0; 1284 } 1285 1286 static void fuse_lk_fill(struct fuse_req *req, struct file *file, 1287 const struct file_lock *fl, int opcode, pid_t pid, 1288 int flock) 1289 { 1290 struct inode *inode = file->f_path.dentry->d_inode; 1291 struct fuse_conn *fc = get_fuse_conn(inode); 1292 struct fuse_file *ff = file->private_data; 1293 struct fuse_lk_in *arg = &req->misc.lk_in; 1294 1295 arg->fh = ff->fh; 1296 arg->owner = fuse_lock_owner_id(fc, fl->fl_owner); 1297 arg->lk.start = fl->fl_start; 1298 arg->lk.end = fl->fl_end; 1299 arg->lk.type = fl->fl_type; 1300 arg->lk.pid = pid; 1301 if (flock) 1302 arg->lk_flags |= FUSE_LK_FLOCK; 1303 req->in.h.opcode = opcode; 1304 req->in.h.nodeid = get_node_id(inode); 1305 req->in.numargs = 1; 1306 req->in.args[0].size = sizeof(*arg); 1307 req->in.args[0].value = arg; 1308 } 1309 1310 static int fuse_getlk(struct file *file, struct file_lock *fl) 1311 { 1312 struct inode *inode = file->f_path.dentry->d_inode; 1313 struct fuse_conn *fc = get_fuse_conn(inode); 1314 struct fuse_req *req; 1315 struct fuse_lk_out outarg; 1316 int err; 1317 1318 req = fuse_get_req(fc); 1319 if (IS_ERR(req)) 1320 return PTR_ERR(req); 1321 1322 fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0); 1323 req->out.numargs = 1; 1324 req->out.args[0].size = sizeof(outarg); 1325 req->out.args[0].value = &outarg; 1326 request_send(fc, req); 1327 err = req->out.h.error; 1328 fuse_put_request(fc, req); 1329 if (!err) 1330 err = convert_fuse_file_lock(&outarg.lk, fl); 1331 1332 return err; 1333 } 1334 1335 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) 1336 { 1337 struct inode *inode = file->f_path.dentry->d_inode; 1338 struct fuse_conn *fc = get_fuse_conn(inode); 1339 struct fuse_req *req; 1340 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; 1341 pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0; 1342 int err; 1343 1344 /* Unlock on close is handled by the flush method */ 1345 if (fl->fl_flags & FL_CLOSE) 1346 return 0; 1347 1348 req = fuse_get_req(fc); 1349 if (IS_ERR(req)) 1350 return PTR_ERR(req); 1351 1352 fuse_lk_fill(req, file, fl, opcode, pid, flock); 1353 request_send(fc, req); 1354 err = req->out.h.error; 1355 /* locking is restartable */ 1356 if (err == -EINTR) 1357 err = -ERESTARTSYS; 1358 fuse_put_request(fc, req); 1359 return err; 1360 } 1361 1362 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) 1363 { 1364 struct inode *inode = file->f_path.dentry->d_inode; 1365 struct fuse_conn *fc = get_fuse_conn(inode); 1366 int err; 1367 1368 if (cmd == F_GETLK) { 1369 if (fc->no_lock) { 1370 posix_test_lock(file, fl); 1371 err = 0; 1372 } else 1373 err = fuse_getlk(file, fl); 1374 } else { 1375 if (fc->no_lock) 1376 err = posix_lock_file_wait(file, fl); 1377 else 1378 err = fuse_setlk(file, fl, 0); 1379 } 1380 return err; 1381 } 1382 1383 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) 1384 { 1385 struct inode *inode = file->f_path.dentry->d_inode; 1386 struct fuse_conn *fc = get_fuse_conn(inode); 1387 int err; 1388 1389 if (fc->no_lock) { 1390 err = flock_lock_file_wait(file, fl); 1391 } else { 1392 /* emulate flock with POSIX locks */ 1393 fl->fl_owner = (fl_owner_t) file; 1394 err = fuse_setlk(file, fl, 1); 1395 } 1396 1397 return err; 1398 } 1399 1400 static sector_t fuse_bmap(struct address_space *mapping, sector_t block) 1401 { 1402 struct inode *inode = mapping->host; 1403 struct fuse_conn *fc = get_fuse_conn(inode); 1404 struct fuse_req *req; 1405 struct fuse_bmap_in inarg; 1406 struct fuse_bmap_out outarg; 1407 int err; 1408 1409 if (!inode->i_sb->s_bdev || fc->no_bmap) 1410 return 0; 1411 1412 req = fuse_get_req(fc); 1413 if (IS_ERR(req)) 1414 return 0; 1415 1416 memset(&inarg, 0, sizeof(inarg)); 1417 inarg.block = block; 1418 inarg.blocksize = inode->i_sb->s_blocksize; 1419 req->in.h.opcode = FUSE_BMAP; 1420 req->in.h.nodeid = get_node_id(inode); 1421 req->in.numargs = 1; 1422 req->in.args[0].size = sizeof(inarg); 1423 req->in.args[0].value = &inarg; 1424 req->out.numargs = 1; 1425 req->out.args[0].size = sizeof(outarg); 1426 req->out.args[0].value = &outarg; 1427 request_send(fc, req); 1428 err = req->out.h.error; 1429 fuse_put_request(fc, req); 1430 if (err == -ENOSYS) 1431 fc->no_bmap = 1; 1432 1433 return err ? 0 : outarg.block; 1434 } 1435 1436 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin) 1437 { 1438 loff_t retval; 1439 struct inode *inode = file->f_path.dentry->d_inode; 1440 1441 mutex_lock(&inode->i_mutex); 1442 switch (origin) { 1443 case SEEK_END: 1444 offset += i_size_read(inode); 1445 break; 1446 case SEEK_CUR: 1447 offset += file->f_pos; 1448 } 1449 retval = -EINVAL; 1450 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) { 1451 if (offset != file->f_pos) { 1452 file->f_pos = offset; 1453 file->f_version = 0; 1454 } 1455 retval = offset; 1456 } 1457 mutex_unlock(&inode->i_mutex); 1458 return retval; 1459 } 1460 1461 static const struct file_operations fuse_file_operations = { 1462 .llseek = fuse_file_llseek, 1463 .read = do_sync_read, 1464 .aio_read = fuse_file_aio_read, 1465 .write = do_sync_write, 1466 .aio_write = fuse_file_aio_write, 1467 .mmap = fuse_file_mmap, 1468 .open = fuse_open, 1469 .flush = fuse_flush, 1470 .release = fuse_release, 1471 .fsync = fuse_fsync, 1472 .lock = fuse_file_lock, 1473 .flock = fuse_file_flock, 1474 .splice_read = generic_file_splice_read, 1475 }; 1476 1477 static const struct file_operations fuse_direct_io_file_operations = { 1478 .llseek = fuse_file_llseek, 1479 .read = fuse_direct_read, 1480 .write = fuse_direct_write, 1481 .open = fuse_open, 1482 .flush = fuse_flush, 1483 .release = fuse_release, 1484 .fsync = fuse_fsync, 1485 .lock = fuse_file_lock, 1486 .flock = fuse_file_flock, 1487 /* no mmap and splice_read */ 1488 }; 1489 1490 static const struct address_space_operations fuse_file_aops = { 1491 .readpage = fuse_readpage, 1492 .writepage = fuse_writepage, 1493 .launder_page = fuse_launder_page, 1494 .write_begin = fuse_write_begin, 1495 .write_end = fuse_write_end, 1496 .readpages = fuse_readpages, 1497 .set_page_dirty = __set_page_dirty_nobuffers, 1498 .bmap = fuse_bmap, 1499 }; 1500 1501 void fuse_init_file_inode(struct inode *inode) 1502 { 1503 inode->i_fop = &fuse_file_operations; 1504 inode->i_data.a_ops = &fuse_file_aops; 1505 } 1506