1 /* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7 */ 8 9 #include "fuse_i.h" 10 11 #include <linux/pagemap.h> 12 #include <linux/slab.h> 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/module.h> 16 #include <linux/compat.h> 17 18 static const struct file_operations fuse_direct_io_file_operations; 19 20 static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file, 21 int opcode, struct fuse_open_out *outargp) 22 { 23 struct fuse_open_in inarg; 24 struct fuse_req *req; 25 int err; 26 27 req = fuse_get_req(fc); 28 if (IS_ERR(req)) 29 return PTR_ERR(req); 30 31 memset(&inarg, 0, sizeof(inarg)); 32 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); 33 if (!fc->atomic_o_trunc) 34 inarg.flags &= ~O_TRUNC; 35 req->in.h.opcode = opcode; 36 req->in.h.nodeid = nodeid; 37 req->in.numargs = 1; 38 req->in.args[0].size = sizeof(inarg); 39 req->in.args[0].value = &inarg; 40 req->out.numargs = 1; 41 req->out.args[0].size = sizeof(*outargp); 42 req->out.args[0].value = outargp; 43 fuse_request_send(fc, req); 44 err = req->out.h.error; 45 fuse_put_request(fc, req); 46 47 return err; 48 } 49 50 struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) 51 { 52 struct fuse_file *ff; 53 54 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); 55 if (unlikely(!ff)) 56 return NULL; 57 58 ff->fc = fc; 59 ff->reserved_req = fuse_request_alloc(); 60 if (unlikely(!ff->reserved_req)) { 61 kfree(ff); 62 return NULL; 63 } 64 65 INIT_LIST_HEAD(&ff->write_entry); 66 atomic_set(&ff->count, 0); 67 RB_CLEAR_NODE(&ff->polled_node); 68 init_waitqueue_head(&ff->poll_wait); 69 70 spin_lock(&fc->lock); 71 ff->kh = ++fc->khctr; 72 spin_unlock(&fc->lock); 73 74 return ff; 75 } 76 77 void fuse_file_free(struct fuse_file *ff) 78 { 79 fuse_request_free(ff->reserved_req); 80 kfree(ff); 81 } 82 83 struct fuse_file *fuse_file_get(struct fuse_file *ff) 84 { 85 atomic_inc(&ff->count); 86 return ff; 87 } 88 89 static void fuse_release_async(struct work_struct *work) 90 { 91 struct fuse_req *req; 92 struct fuse_conn *fc; 93 struct path path; 94 95 req = container_of(work, struct fuse_req, misc.release.work); 96 path = req->misc.release.path; 97 fc = get_fuse_conn(path.dentry->d_inode); 98 99 fuse_put_request(fc, req); 100 path_put(&path); 101 } 102 103 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) 104 { 105 if (fc->destroy_req) { 106 /* 107 * If this is a fuseblk mount, then it's possible that 108 * releasing the path will result in releasing the 109 * super block and sending the DESTROY request. If 110 * the server is single threaded, this would hang. 111 * For this reason do the path_put() in a separate 112 * thread. 113 */ 114 atomic_inc(&req->count); 115 INIT_WORK(&req->misc.release.work, fuse_release_async); 116 schedule_work(&req->misc.release.work); 117 } else { 118 path_put(&req->misc.release.path); 119 } 120 } 121 122 static void fuse_file_put(struct fuse_file *ff, bool sync) 123 { 124 if (atomic_dec_and_test(&ff->count)) { 125 struct fuse_req *req = ff->reserved_req; 126 127 if (sync) { 128 fuse_request_send(ff->fc, req); 129 path_put(&req->misc.release.path); 130 fuse_put_request(ff->fc, req); 131 } else { 132 req->end = fuse_release_end; 133 fuse_request_send_background(ff->fc, req); 134 } 135 kfree(ff); 136 } 137 } 138 139 int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, 140 bool isdir) 141 { 142 struct fuse_open_out outarg; 143 struct fuse_file *ff; 144 int err; 145 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; 146 147 ff = fuse_file_alloc(fc); 148 if (!ff) 149 return -ENOMEM; 150 151 err = fuse_send_open(fc, nodeid, file, opcode, &outarg); 152 if (err) { 153 fuse_file_free(ff); 154 return err; 155 } 156 157 if (isdir) 158 outarg.open_flags &= ~FOPEN_DIRECT_IO; 159 160 ff->fh = outarg.fh; 161 ff->nodeid = nodeid; 162 ff->open_flags = outarg.open_flags; 163 file->private_data = fuse_file_get(ff); 164 165 return 0; 166 } 167 EXPORT_SYMBOL_GPL(fuse_do_open); 168 169 void fuse_finish_open(struct inode *inode, struct file *file) 170 { 171 struct fuse_file *ff = file->private_data; 172 struct fuse_conn *fc = get_fuse_conn(inode); 173 174 if (ff->open_flags & FOPEN_DIRECT_IO) 175 file->f_op = &fuse_direct_io_file_operations; 176 if (!(ff->open_flags & FOPEN_KEEP_CACHE)) 177 invalidate_inode_pages2(inode->i_mapping); 178 if (ff->open_flags & FOPEN_NONSEEKABLE) 179 nonseekable_open(inode, file); 180 if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { 181 struct fuse_inode *fi = get_fuse_inode(inode); 182 183 spin_lock(&fc->lock); 184 fi->attr_version = ++fc->attr_version; 185 i_size_write(inode, 0); 186 spin_unlock(&fc->lock); 187 fuse_invalidate_attr(inode); 188 } 189 } 190 191 int fuse_open_common(struct inode *inode, struct file *file, bool isdir) 192 { 193 struct fuse_conn *fc = get_fuse_conn(inode); 194 int err; 195 196 /* VFS checks this, but only _after_ ->open() */ 197 if (file->f_flags & O_DIRECT) 198 return -EINVAL; 199 200 err = generic_file_open(inode, file); 201 if (err) 202 return err; 203 204 err = fuse_do_open(fc, get_node_id(inode), file, isdir); 205 if (err) 206 return err; 207 208 fuse_finish_open(inode, file); 209 210 return 0; 211 } 212 213 static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode) 214 { 215 struct fuse_conn *fc = ff->fc; 216 struct fuse_req *req = ff->reserved_req; 217 struct fuse_release_in *inarg = &req->misc.release.in; 218 219 spin_lock(&fc->lock); 220 list_del(&ff->write_entry); 221 if (!RB_EMPTY_NODE(&ff->polled_node)) 222 rb_erase(&ff->polled_node, &fc->polled_files); 223 spin_unlock(&fc->lock); 224 225 wake_up_interruptible_sync(&ff->poll_wait); 226 227 inarg->fh = ff->fh; 228 inarg->flags = flags; 229 req->in.h.opcode = opcode; 230 req->in.h.nodeid = ff->nodeid; 231 req->in.numargs = 1; 232 req->in.args[0].size = sizeof(struct fuse_release_in); 233 req->in.args[0].value = inarg; 234 } 235 236 void fuse_release_common(struct file *file, int opcode) 237 { 238 struct fuse_file *ff; 239 struct fuse_req *req; 240 241 ff = file->private_data; 242 if (unlikely(!ff)) 243 return; 244 245 req = ff->reserved_req; 246 fuse_prepare_release(ff, file->f_flags, opcode); 247 248 /* Hold vfsmount and dentry until release is finished */ 249 path_get(&file->f_path); 250 req->misc.release.path = file->f_path; 251 252 /* 253 * Normally this will send the RELEASE request, however if 254 * some asynchronous READ or WRITE requests are outstanding, 255 * the sending will be delayed. 256 * 257 * Make the release synchronous if this is a fuseblk mount, 258 * synchronous RELEASE is allowed (and desirable) in this case 259 * because the server can be trusted not to screw up. 260 */ 261 fuse_file_put(ff, ff->fc->destroy_req != NULL); 262 } 263 264 static int fuse_open(struct inode *inode, struct file *file) 265 { 266 return fuse_open_common(inode, file, false); 267 } 268 269 static int fuse_release(struct inode *inode, struct file *file) 270 { 271 fuse_release_common(file, FUSE_RELEASE); 272 273 /* return value is ignored by VFS */ 274 return 0; 275 } 276 277 void fuse_sync_release(struct fuse_file *ff, int flags) 278 { 279 WARN_ON(atomic_read(&ff->count) > 1); 280 fuse_prepare_release(ff, flags, FUSE_RELEASE); 281 ff->reserved_req->force = 1; 282 fuse_request_send(ff->fc, ff->reserved_req); 283 fuse_put_request(ff->fc, ff->reserved_req); 284 kfree(ff); 285 } 286 EXPORT_SYMBOL_GPL(fuse_sync_release); 287 288 /* 289 * Scramble the ID space with XTEA, so that the value of the files_struct 290 * pointer is not exposed to userspace. 291 */ 292 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) 293 { 294 u32 *k = fc->scramble_key; 295 u64 v = (unsigned long) id; 296 u32 v0 = v; 297 u32 v1 = v >> 32; 298 u32 sum = 0; 299 int i; 300 301 for (i = 0; i < 32; i++) { 302 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); 303 sum += 0x9E3779B9; 304 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); 305 } 306 307 return (u64) v0 + ((u64) v1 << 32); 308 } 309 310 /* 311 * Check if page is under writeback 312 * 313 * This is currently done by walking the list of writepage requests 314 * for the inode, which can be pretty inefficient. 315 */ 316 static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index) 317 { 318 struct fuse_conn *fc = get_fuse_conn(inode); 319 struct fuse_inode *fi = get_fuse_inode(inode); 320 struct fuse_req *req; 321 bool found = false; 322 323 spin_lock(&fc->lock); 324 list_for_each_entry(req, &fi->writepages, writepages_entry) { 325 pgoff_t curr_index; 326 327 BUG_ON(req->inode != inode); 328 curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT; 329 if (curr_index == index) { 330 found = true; 331 break; 332 } 333 } 334 spin_unlock(&fc->lock); 335 336 return found; 337 } 338 339 /* 340 * Wait for page writeback to be completed. 341 * 342 * Since fuse doesn't rely on the VM writeback tracking, this has to 343 * use some other means. 344 */ 345 static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) 346 { 347 struct fuse_inode *fi = get_fuse_inode(inode); 348 349 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); 350 return 0; 351 } 352 353 static int fuse_flush(struct file *file, fl_owner_t id) 354 { 355 struct inode *inode = file->f_path.dentry->d_inode; 356 struct fuse_conn *fc = get_fuse_conn(inode); 357 struct fuse_file *ff = file->private_data; 358 struct fuse_req *req; 359 struct fuse_flush_in inarg; 360 int err; 361 362 if (is_bad_inode(inode)) 363 return -EIO; 364 365 if (fc->no_flush) 366 return 0; 367 368 req = fuse_get_req_nofail(fc, file); 369 memset(&inarg, 0, sizeof(inarg)); 370 inarg.fh = ff->fh; 371 inarg.lock_owner = fuse_lock_owner_id(fc, id); 372 req->in.h.opcode = FUSE_FLUSH; 373 req->in.h.nodeid = get_node_id(inode); 374 req->in.numargs = 1; 375 req->in.args[0].size = sizeof(inarg); 376 req->in.args[0].value = &inarg; 377 req->force = 1; 378 fuse_request_send(fc, req); 379 err = req->out.h.error; 380 fuse_put_request(fc, req); 381 if (err == -ENOSYS) { 382 fc->no_flush = 1; 383 err = 0; 384 } 385 return err; 386 } 387 388 /* 389 * Wait for all pending writepages on the inode to finish. 390 * 391 * This is currently done by blocking further writes with FUSE_NOWRITE 392 * and waiting for all sent writes to complete. 393 * 394 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage 395 * could conflict with truncation. 396 */ 397 static void fuse_sync_writes(struct inode *inode) 398 { 399 fuse_set_nowrite(inode); 400 fuse_release_nowrite(inode); 401 } 402 403 int fuse_fsync_common(struct file *file, int datasync, int isdir) 404 { 405 struct inode *inode = file->f_mapping->host; 406 struct fuse_conn *fc = get_fuse_conn(inode); 407 struct fuse_file *ff = file->private_data; 408 struct fuse_req *req; 409 struct fuse_fsync_in inarg; 410 int err; 411 412 if (is_bad_inode(inode)) 413 return -EIO; 414 415 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) 416 return 0; 417 418 /* 419 * Start writeback against all dirty pages of the inode, then 420 * wait for all outstanding writes, before sending the FSYNC 421 * request. 422 */ 423 err = write_inode_now(inode, 0); 424 if (err) 425 return err; 426 427 fuse_sync_writes(inode); 428 429 req = fuse_get_req(fc); 430 if (IS_ERR(req)) 431 return PTR_ERR(req); 432 433 memset(&inarg, 0, sizeof(inarg)); 434 inarg.fh = ff->fh; 435 inarg.fsync_flags = datasync ? 1 : 0; 436 req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; 437 req->in.h.nodeid = get_node_id(inode); 438 req->in.numargs = 1; 439 req->in.args[0].size = sizeof(inarg); 440 req->in.args[0].value = &inarg; 441 fuse_request_send(fc, req); 442 err = req->out.h.error; 443 fuse_put_request(fc, req); 444 if (err == -ENOSYS) { 445 if (isdir) 446 fc->no_fsyncdir = 1; 447 else 448 fc->no_fsync = 1; 449 err = 0; 450 } 451 return err; 452 } 453 454 static int fuse_fsync(struct file *file, int datasync) 455 { 456 return fuse_fsync_common(file, datasync, 0); 457 } 458 459 void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, 460 size_t count, int opcode) 461 { 462 struct fuse_read_in *inarg = &req->misc.read.in; 463 struct fuse_file *ff = file->private_data; 464 465 inarg->fh = ff->fh; 466 inarg->offset = pos; 467 inarg->size = count; 468 inarg->flags = file->f_flags; 469 req->in.h.opcode = opcode; 470 req->in.h.nodeid = ff->nodeid; 471 req->in.numargs = 1; 472 req->in.args[0].size = sizeof(struct fuse_read_in); 473 req->in.args[0].value = inarg; 474 req->out.argvar = 1; 475 req->out.numargs = 1; 476 req->out.args[0].size = count; 477 } 478 479 static size_t fuse_send_read(struct fuse_req *req, struct file *file, 480 loff_t pos, size_t count, fl_owner_t owner) 481 { 482 struct fuse_file *ff = file->private_data; 483 struct fuse_conn *fc = ff->fc; 484 485 fuse_read_fill(req, file, pos, count, FUSE_READ); 486 if (owner != NULL) { 487 struct fuse_read_in *inarg = &req->misc.read.in; 488 489 inarg->read_flags |= FUSE_READ_LOCKOWNER; 490 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 491 } 492 fuse_request_send(fc, req); 493 return req->out.args[0].size; 494 } 495 496 static void fuse_read_update_size(struct inode *inode, loff_t size, 497 u64 attr_ver) 498 { 499 struct fuse_conn *fc = get_fuse_conn(inode); 500 struct fuse_inode *fi = get_fuse_inode(inode); 501 502 spin_lock(&fc->lock); 503 if (attr_ver == fi->attr_version && size < inode->i_size) { 504 fi->attr_version = ++fc->attr_version; 505 i_size_write(inode, size); 506 } 507 spin_unlock(&fc->lock); 508 } 509 510 static int fuse_readpage(struct file *file, struct page *page) 511 { 512 struct inode *inode = page->mapping->host; 513 struct fuse_conn *fc = get_fuse_conn(inode); 514 struct fuse_req *req; 515 size_t num_read; 516 loff_t pos = page_offset(page); 517 size_t count = PAGE_CACHE_SIZE; 518 u64 attr_ver; 519 int err; 520 521 err = -EIO; 522 if (is_bad_inode(inode)) 523 goto out; 524 525 /* 526 * Page writeback can extend beyond the liftime of the 527 * page-cache page, so make sure we read a properly synced 528 * page. 529 */ 530 fuse_wait_on_page_writeback(inode, page->index); 531 532 req = fuse_get_req(fc); 533 err = PTR_ERR(req); 534 if (IS_ERR(req)) 535 goto out; 536 537 attr_ver = fuse_get_attr_version(fc); 538 539 req->out.page_zeroing = 1; 540 req->out.argpages = 1; 541 req->num_pages = 1; 542 req->pages[0] = page; 543 num_read = fuse_send_read(req, file, pos, count, NULL); 544 err = req->out.h.error; 545 fuse_put_request(fc, req); 546 547 if (!err) { 548 /* 549 * Short read means EOF. If file size is larger, truncate it 550 */ 551 if (num_read < count) 552 fuse_read_update_size(inode, pos + num_read, attr_ver); 553 554 SetPageUptodate(page); 555 } 556 557 fuse_invalidate_attr(inode); /* atime changed */ 558 out: 559 unlock_page(page); 560 return err; 561 } 562 563 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) 564 { 565 int i; 566 size_t count = req->misc.read.in.size; 567 size_t num_read = req->out.args[0].size; 568 struct address_space *mapping = NULL; 569 570 for (i = 0; mapping == NULL && i < req->num_pages; i++) 571 mapping = req->pages[i]->mapping; 572 573 if (mapping) { 574 struct inode *inode = mapping->host; 575 576 /* 577 * Short read means EOF. If file size is larger, truncate it 578 */ 579 if (!req->out.h.error && num_read < count) { 580 loff_t pos; 581 582 pos = page_offset(req->pages[0]) + num_read; 583 fuse_read_update_size(inode, pos, 584 req->misc.read.attr_ver); 585 } 586 fuse_invalidate_attr(inode); /* atime changed */ 587 } 588 589 for (i = 0; i < req->num_pages; i++) { 590 struct page *page = req->pages[i]; 591 if (!req->out.h.error) 592 SetPageUptodate(page); 593 else 594 SetPageError(page); 595 unlock_page(page); 596 page_cache_release(page); 597 } 598 if (req->ff) 599 fuse_file_put(req->ff, false); 600 } 601 602 static void fuse_send_readpages(struct fuse_req *req, struct file *file) 603 { 604 struct fuse_file *ff = file->private_data; 605 struct fuse_conn *fc = ff->fc; 606 loff_t pos = page_offset(req->pages[0]); 607 size_t count = req->num_pages << PAGE_CACHE_SHIFT; 608 609 req->out.argpages = 1; 610 req->out.page_zeroing = 1; 611 req->out.page_replace = 1; 612 fuse_read_fill(req, file, pos, count, FUSE_READ); 613 req->misc.read.attr_ver = fuse_get_attr_version(fc); 614 if (fc->async_read) { 615 req->ff = fuse_file_get(ff); 616 req->end = fuse_readpages_end; 617 fuse_request_send_background(fc, req); 618 } else { 619 fuse_request_send(fc, req); 620 fuse_readpages_end(fc, req); 621 fuse_put_request(fc, req); 622 } 623 } 624 625 struct fuse_fill_data { 626 struct fuse_req *req; 627 struct file *file; 628 struct inode *inode; 629 }; 630 631 static int fuse_readpages_fill(void *_data, struct page *page) 632 { 633 struct fuse_fill_data *data = _data; 634 struct fuse_req *req = data->req; 635 struct inode *inode = data->inode; 636 struct fuse_conn *fc = get_fuse_conn(inode); 637 638 fuse_wait_on_page_writeback(inode, page->index); 639 640 if (req->num_pages && 641 (req->num_pages == FUSE_MAX_PAGES_PER_REQ || 642 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || 643 req->pages[req->num_pages - 1]->index + 1 != page->index)) { 644 fuse_send_readpages(req, data->file); 645 data->req = req = fuse_get_req(fc); 646 if (IS_ERR(req)) { 647 unlock_page(page); 648 return PTR_ERR(req); 649 } 650 } 651 page_cache_get(page); 652 req->pages[req->num_pages] = page; 653 req->num_pages++; 654 return 0; 655 } 656 657 static int fuse_readpages(struct file *file, struct address_space *mapping, 658 struct list_head *pages, unsigned nr_pages) 659 { 660 struct inode *inode = mapping->host; 661 struct fuse_conn *fc = get_fuse_conn(inode); 662 struct fuse_fill_data data; 663 int err; 664 665 err = -EIO; 666 if (is_bad_inode(inode)) 667 goto out; 668 669 data.file = file; 670 data.inode = inode; 671 data.req = fuse_get_req(fc); 672 err = PTR_ERR(data.req); 673 if (IS_ERR(data.req)) 674 goto out; 675 676 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); 677 if (!err) { 678 if (data.req->num_pages) 679 fuse_send_readpages(data.req, file); 680 else 681 fuse_put_request(fc, data.req); 682 } 683 out: 684 return err; 685 } 686 687 static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov, 688 unsigned long nr_segs, loff_t pos) 689 { 690 struct inode *inode = iocb->ki_filp->f_mapping->host; 691 692 if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) { 693 int err; 694 /* 695 * If trying to read past EOF, make sure the i_size 696 * attribute is up-to-date. 697 */ 698 err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL); 699 if (err) 700 return err; 701 } 702 703 return generic_file_aio_read(iocb, iov, nr_segs, pos); 704 } 705 706 static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff, 707 loff_t pos, size_t count) 708 { 709 struct fuse_write_in *inarg = &req->misc.write.in; 710 struct fuse_write_out *outarg = &req->misc.write.out; 711 712 inarg->fh = ff->fh; 713 inarg->offset = pos; 714 inarg->size = count; 715 req->in.h.opcode = FUSE_WRITE; 716 req->in.h.nodeid = ff->nodeid; 717 req->in.numargs = 2; 718 if (ff->fc->minor < 9) 719 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; 720 else 721 req->in.args[0].size = sizeof(struct fuse_write_in); 722 req->in.args[0].value = inarg; 723 req->in.args[1].size = count; 724 req->out.numargs = 1; 725 req->out.args[0].size = sizeof(struct fuse_write_out); 726 req->out.args[0].value = outarg; 727 } 728 729 static size_t fuse_send_write(struct fuse_req *req, struct file *file, 730 loff_t pos, size_t count, fl_owner_t owner) 731 { 732 struct fuse_file *ff = file->private_data; 733 struct fuse_conn *fc = ff->fc; 734 struct fuse_write_in *inarg = &req->misc.write.in; 735 736 fuse_write_fill(req, ff, pos, count); 737 inarg->flags = file->f_flags; 738 if (owner != NULL) { 739 inarg->write_flags |= FUSE_WRITE_LOCKOWNER; 740 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 741 } 742 fuse_request_send(fc, req); 743 return req->misc.write.out.size; 744 } 745 746 static int fuse_write_begin(struct file *file, struct address_space *mapping, 747 loff_t pos, unsigned len, unsigned flags, 748 struct page **pagep, void **fsdata) 749 { 750 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 751 752 *pagep = grab_cache_page_write_begin(mapping, index, flags); 753 if (!*pagep) 754 return -ENOMEM; 755 return 0; 756 } 757 758 void fuse_write_update_size(struct inode *inode, loff_t pos) 759 { 760 struct fuse_conn *fc = get_fuse_conn(inode); 761 struct fuse_inode *fi = get_fuse_inode(inode); 762 763 spin_lock(&fc->lock); 764 fi->attr_version = ++fc->attr_version; 765 if (pos > inode->i_size) 766 i_size_write(inode, pos); 767 spin_unlock(&fc->lock); 768 } 769 770 static int fuse_buffered_write(struct file *file, struct inode *inode, 771 loff_t pos, unsigned count, struct page *page) 772 { 773 int err; 774 size_t nres; 775 struct fuse_conn *fc = get_fuse_conn(inode); 776 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 777 struct fuse_req *req; 778 779 if (is_bad_inode(inode)) 780 return -EIO; 781 782 /* 783 * Make sure writepages on the same page are not mixed up with 784 * plain writes. 785 */ 786 fuse_wait_on_page_writeback(inode, page->index); 787 788 req = fuse_get_req(fc); 789 if (IS_ERR(req)) 790 return PTR_ERR(req); 791 792 req->in.argpages = 1; 793 req->num_pages = 1; 794 req->pages[0] = page; 795 req->page_offset = offset; 796 nres = fuse_send_write(req, file, pos, count, NULL); 797 err = req->out.h.error; 798 fuse_put_request(fc, req); 799 if (!err && !nres) 800 err = -EIO; 801 if (!err) { 802 pos += nres; 803 fuse_write_update_size(inode, pos); 804 if (count == PAGE_CACHE_SIZE) 805 SetPageUptodate(page); 806 } 807 fuse_invalidate_attr(inode); 808 return err ? err : nres; 809 } 810 811 static int fuse_write_end(struct file *file, struct address_space *mapping, 812 loff_t pos, unsigned len, unsigned copied, 813 struct page *page, void *fsdata) 814 { 815 struct inode *inode = mapping->host; 816 int res = 0; 817 818 if (copied) 819 res = fuse_buffered_write(file, inode, pos, copied, page); 820 821 unlock_page(page); 822 page_cache_release(page); 823 return res; 824 } 825 826 static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, 827 struct inode *inode, loff_t pos, 828 size_t count) 829 { 830 size_t res; 831 unsigned offset; 832 unsigned i; 833 834 for (i = 0; i < req->num_pages; i++) 835 fuse_wait_on_page_writeback(inode, req->pages[i]->index); 836 837 res = fuse_send_write(req, file, pos, count, NULL); 838 839 offset = req->page_offset; 840 count = res; 841 for (i = 0; i < req->num_pages; i++) { 842 struct page *page = req->pages[i]; 843 844 if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE) 845 SetPageUptodate(page); 846 847 if (count > PAGE_CACHE_SIZE - offset) 848 count -= PAGE_CACHE_SIZE - offset; 849 else 850 count = 0; 851 offset = 0; 852 853 unlock_page(page); 854 page_cache_release(page); 855 } 856 857 return res; 858 } 859 860 static ssize_t fuse_fill_write_pages(struct fuse_req *req, 861 struct address_space *mapping, 862 struct iov_iter *ii, loff_t pos) 863 { 864 struct fuse_conn *fc = get_fuse_conn(mapping->host); 865 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 866 size_t count = 0; 867 int err; 868 869 req->in.argpages = 1; 870 req->page_offset = offset; 871 872 do { 873 size_t tmp; 874 struct page *page; 875 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 876 size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, 877 iov_iter_count(ii)); 878 879 bytes = min_t(size_t, bytes, fc->max_write - count); 880 881 again: 882 err = -EFAULT; 883 if (iov_iter_fault_in_readable(ii, bytes)) 884 break; 885 886 err = -ENOMEM; 887 page = grab_cache_page_write_begin(mapping, index, 0); 888 if (!page) 889 break; 890 891 if (mapping_writably_mapped(mapping)) 892 flush_dcache_page(page); 893 894 pagefault_disable(); 895 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); 896 pagefault_enable(); 897 flush_dcache_page(page); 898 899 if (!tmp) { 900 unlock_page(page); 901 page_cache_release(page); 902 bytes = min(bytes, iov_iter_single_seg_count(ii)); 903 goto again; 904 } 905 906 err = 0; 907 req->pages[req->num_pages] = page; 908 req->num_pages++; 909 910 iov_iter_advance(ii, tmp); 911 count += tmp; 912 pos += tmp; 913 offset += tmp; 914 if (offset == PAGE_CACHE_SIZE) 915 offset = 0; 916 917 if (!fc->big_writes) 918 break; 919 } while (iov_iter_count(ii) && count < fc->max_write && 920 req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0); 921 922 return count > 0 ? count : err; 923 } 924 925 static ssize_t fuse_perform_write(struct file *file, 926 struct address_space *mapping, 927 struct iov_iter *ii, loff_t pos) 928 { 929 struct inode *inode = mapping->host; 930 struct fuse_conn *fc = get_fuse_conn(inode); 931 int err = 0; 932 ssize_t res = 0; 933 934 if (is_bad_inode(inode)) 935 return -EIO; 936 937 do { 938 struct fuse_req *req; 939 ssize_t count; 940 941 req = fuse_get_req(fc); 942 if (IS_ERR(req)) { 943 err = PTR_ERR(req); 944 break; 945 } 946 947 count = fuse_fill_write_pages(req, mapping, ii, pos); 948 if (count <= 0) { 949 err = count; 950 } else { 951 size_t num_written; 952 953 num_written = fuse_send_write_pages(req, file, inode, 954 pos, count); 955 err = req->out.h.error; 956 if (!err) { 957 res += num_written; 958 pos += num_written; 959 960 /* break out of the loop on short write */ 961 if (num_written != count) 962 err = -EIO; 963 } 964 } 965 fuse_put_request(fc, req); 966 } while (!err && iov_iter_count(ii)); 967 968 if (res > 0) 969 fuse_write_update_size(inode, pos); 970 971 fuse_invalidate_attr(inode); 972 973 return res > 0 ? res : err; 974 } 975 976 static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov, 977 unsigned long nr_segs, loff_t pos) 978 { 979 struct file *file = iocb->ki_filp; 980 struct address_space *mapping = file->f_mapping; 981 size_t count = 0; 982 ssize_t written = 0; 983 struct inode *inode = mapping->host; 984 ssize_t err; 985 struct iov_iter i; 986 987 WARN_ON(iocb->ki_pos != pos); 988 989 err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ); 990 if (err) 991 return err; 992 993 mutex_lock(&inode->i_mutex); 994 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 995 996 /* We can write back this queue in page reclaim */ 997 current->backing_dev_info = mapping->backing_dev_info; 998 999 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 1000 if (err) 1001 goto out; 1002 1003 if (count == 0) 1004 goto out; 1005 1006 err = file_remove_suid(file); 1007 if (err) 1008 goto out; 1009 1010 file_update_time(file); 1011 1012 iov_iter_init(&i, iov, nr_segs, count, 0); 1013 written = fuse_perform_write(file, mapping, &i, pos); 1014 if (written >= 0) 1015 iocb->ki_pos = pos + written; 1016 1017 out: 1018 current->backing_dev_info = NULL; 1019 mutex_unlock(&inode->i_mutex); 1020 1021 return written ? written : err; 1022 } 1023 1024 static void fuse_release_user_pages(struct fuse_req *req, int write) 1025 { 1026 unsigned i; 1027 1028 for (i = 0; i < req->num_pages; i++) { 1029 struct page *page = req->pages[i]; 1030 if (write) 1031 set_page_dirty_lock(page); 1032 put_page(page); 1033 } 1034 } 1035 1036 static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf, 1037 size_t *nbytesp, int write) 1038 { 1039 size_t nbytes = *nbytesp; 1040 unsigned long user_addr = (unsigned long) buf; 1041 unsigned offset = user_addr & ~PAGE_MASK; 1042 int npages; 1043 1044 /* Special case for kernel I/O: can copy directly into the buffer */ 1045 if (segment_eq(get_fs(), KERNEL_DS)) { 1046 if (write) 1047 req->in.args[1].value = (void *) user_addr; 1048 else 1049 req->out.args[0].value = (void *) user_addr; 1050 1051 return 0; 1052 } 1053 1054 nbytes = min_t(size_t, nbytes, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); 1055 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 1056 npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ); 1057 npages = get_user_pages_fast(user_addr, npages, !write, req->pages); 1058 if (npages < 0) 1059 return npages; 1060 1061 req->num_pages = npages; 1062 req->page_offset = offset; 1063 1064 if (write) 1065 req->in.argpages = 1; 1066 else 1067 req->out.argpages = 1; 1068 1069 nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset; 1070 *nbytesp = min(*nbytesp, nbytes); 1071 1072 return 0; 1073 } 1074 1075 ssize_t fuse_direct_io(struct file *file, const char __user *buf, 1076 size_t count, loff_t *ppos, int write) 1077 { 1078 struct fuse_file *ff = file->private_data; 1079 struct fuse_conn *fc = ff->fc; 1080 size_t nmax = write ? fc->max_write : fc->max_read; 1081 loff_t pos = *ppos; 1082 ssize_t res = 0; 1083 struct fuse_req *req; 1084 1085 req = fuse_get_req(fc); 1086 if (IS_ERR(req)) 1087 return PTR_ERR(req); 1088 1089 while (count) { 1090 size_t nres; 1091 fl_owner_t owner = current->files; 1092 size_t nbytes = min(count, nmax); 1093 int err = fuse_get_user_pages(req, buf, &nbytes, write); 1094 if (err) { 1095 res = err; 1096 break; 1097 } 1098 1099 if (write) 1100 nres = fuse_send_write(req, file, pos, nbytes, owner); 1101 else 1102 nres = fuse_send_read(req, file, pos, nbytes, owner); 1103 1104 fuse_release_user_pages(req, !write); 1105 if (req->out.h.error) { 1106 if (!res) 1107 res = req->out.h.error; 1108 break; 1109 } else if (nres > nbytes) { 1110 res = -EIO; 1111 break; 1112 } 1113 count -= nres; 1114 res += nres; 1115 pos += nres; 1116 buf += nres; 1117 if (nres != nbytes) 1118 break; 1119 if (count) { 1120 fuse_put_request(fc, req); 1121 req = fuse_get_req(fc); 1122 if (IS_ERR(req)) 1123 break; 1124 } 1125 } 1126 if (!IS_ERR(req)) 1127 fuse_put_request(fc, req); 1128 if (res > 0) 1129 *ppos = pos; 1130 1131 return res; 1132 } 1133 EXPORT_SYMBOL_GPL(fuse_direct_io); 1134 1135 static ssize_t fuse_direct_read(struct file *file, char __user *buf, 1136 size_t count, loff_t *ppos) 1137 { 1138 ssize_t res; 1139 struct inode *inode = file->f_path.dentry->d_inode; 1140 1141 if (is_bad_inode(inode)) 1142 return -EIO; 1143 1144 res = fuse_direct_io(file, buf, count, ppos, 0); 1145 1146 fuse_invalidate_attr(inode); 1147 1148 return res; 1149 } 1150 1151 static ssize_t fuse_direct_write(struct file *file, const char __user *buf, 1152 size_t count, loff_t *ppos) 1153 { 1154 struct inode *inode = file->f_path.dentry->d_inode; 1155 ssize_t res; 1156 1157 if (is_bad_inode(inode)) 1158 return -EIO; 1159 1160 /* Don't allow parallel writes to the same file */ 1161 mutex_lock(&inode->i_mutex); 1162 res = generic_write_checks(file, ppos, &count, 0); 1163 if (!res) { 1164 res = fuse_direct_io(file, buf, count, ppos, 1); 1165 if (res > 0) 1166 fuse_write_update_size(inode, *ppos); 1167 } 1168 mutex_unlock(&inode->i_mutex); 1169 1170 fuse_invalidate_attr(inode); 1171 1172 return res; 1173 } 1174 1175 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) 1176 { 1177 __free_page(req->pages[0]); 1178 fuse_file_put(req->ff, false); 1179 } 1180 1181 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) 1182 { 1183 struct inode *inode = req->inode; 1184 struct fuse_inode *fi = get_fuse_inode(inode); 1185 struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; 1186 1187 list_del(&req->writepages_entry); 1188 dec_bdi_stat(bdi, BDI_WRITEBACK); 1189 dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP); 1190 bdi_writeout_inc(bdi); 1191 wake_up(&fi->page_waitq); 1192 } 1193 1194 /* Called under fc->lock, may release and reacquire it */ 1195 static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req) 1196 __releases(fc->lock) 1197 __acquires(fc->lock) 1198 { 1199 struct fuse_inode *fi = get_fuse_inode(req->inode); 1200 loff_t size = i_size_read(req->inode); 1201 struct fuse_write_in *inarg = &req->misc.write.in; 1202 1203 if (!fc->connected) 1204 goto out_free; 1205 1206 if (inarg->offset + PAGE_CACHE_SIZE <= size) { 1207 inarg->size = PAGE_CACHE_SIZE; 1208 } else if (inarg->offset < size) { 1209 inarg->size = size & (PAGE_CACHE_SIZE - 1); 1210 } else { 1211 /* Got truncated off completely */ 1212 goto out_free; 1213 } 1214 1215 req->in.args[1].size = inarg->size; 1216 fi->writectr++; 1217 fuse_request_send_background_locked(fc, req); 1218 return; 1219 1220 out_free: 1221 fuse_writepage_finish(fc, req); 1222 spin_unlock(&fc->lock); 1223 fuse_writepage_free(fc, req); 1224 fuse_put_request(fc, req); 1225 spin_lock(&fc->lock); 1226 } 1227 1228 /* 1229 * If fi->writectr is positive (no truncate or fsync going on) send 1230 * all queued writepage requests. 1231 * 1232 * Called with fc->lock 1233 */ 1234 void fuse_flush_writepages(struct inode *inode) 1235 __releases(fc->lock) 1236 __acquires(fc->lock) 1237 { 1238 struct fuse_conn *fc = get_fuse_conn(inode); 1239 struct fuse_inode *fi = get_fuse_inode(inode); 1240 struct fuse_req *req; 1241 1242 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { 1243 req = list_entry(fi->queued_writes.next, struct fuse_req, list); 1244 list_del_init(&req->list); 1245 fuse_send_writepage(fc, req); 1246 } 1247 } 1248 1249 static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req) 1250 { 1251 struct inode *inode = req->inode; 1252 struct fuse_inode *fi = get_fuse_inode(inode); 1253 1254 mapping_set_error(inode->i_mapping, req->out.h.error); 1255 spin_lock(&fc->lock); 1256 fi->writectr--; 1257 fuse_writepage_finish(fc, req); 1258 spin_unlock(&fc->lock); 1259 fuse_writepage_free(fc, req); 1260 } 1261 1262 static int fuse_writepage_locked(struct page *page) 1263 { 1264 struct address_space *mapping = page->mapping; 1265 struct inode *inode = mapping->host; 1266 struct fuse_conn *fc = get_fuse_conn(inode); 1267 struct fuse_inode *fi = get_fuse_inode(inode); 1268 struct fuse_req *req; 1269 struct fuse_file *ff; 1270 struct page *tmp_page; 1271 1272 set_page_writeback(page); 1273 1274 req = fuse_request_alloc_nofs(); 1275 if (!req) 1276 goto err; 1277 1278 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 1279 if (!tmp_page) 1280 goto err_free; 1281 1282 spin_lock(&fc->lock); 1283 BUG_ON(list_empty(&fi->write_files)); 1284 ff = list_entry(fi->write_files.next, struct fuse_file, write_entry); 1285 req->ff = fuse_file_get(ff); 1286 spin_unlock(&fc->lock); 1287 1288 fuse_write_fill(req, ff, page_offset(page), 0); 1289 1290 copy_highpage(tmp_page, page); 1291 req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; 1292 req->in.argpages = 1; 1293 req->num_pages = 1; 1294 req->pages[0] = tmp_page; 1295 req->page_offset = 0; 1296 req->end = fuse_writepage_end; 1297 req->inode = inode; 1298 1299 inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK); 1300 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); 1301 end_page_writeback(page); 1302 1303 spin_lock(&fc->lock); 1304 list_add(&req->writepages_entry, &fi->writepages); 1305 list_add_tail(&req->list, &fi->queued_writes); 1306 fuse_flush_writepages(inode); 1307 spin_unlock(&fc->lock); 1308 1309 return 0; 1310 1311 err_free: 1312 fuse_request_free(req); 1313 err: 1314 end_page_writeback(page); 1315 return -ENOMEM; 1316 } 1317 1318 static int fuse_writepage(struct page *page, struct writeback_control *wbc) 1319 { 1320 int err; 1321 1322 err = fuse_writepage_locked(page); 1323 unlock_page(page); 1324 1325 return err; 1326 } 1327 1328 static int fuse_launder_page(struct page *page) 1329 { 1330 int err = 0; 1331 if (clear_page_dirty_for_io(page)) { 1332 struct inode *inode = page->mapping->host; 1333 err = fuse_writepage_locked(page); 1334 if (!err) 1335 fuse_wait_on_page_writeback(inode, page->index); 1336 } 1337 return err; 1338 } 1339 1340 /* 1341 * Write back dirty pages now, because there may not be any suitable 1342 * open files later 1343 */ 1344 static void fuse_vma_close(struct vm_area_struct *vma) 1345 { 1346 filemap_write_and_wait(vma->vm_file->f_mapping); 1347 } 1348 1349 /* 1350 * Wait for writeback against this page to complete before allowing it 1351 * to be marked dirty again, and hence written back again, possibly 1352 * before the previous writepage completed. 1353 * 1354 * Block here, instead of in ->writepage(), so that the userspace fs 1355 * can only block processes actually operating on the filesystem. 1356 * 1357 * Otherwise unprivileged userspace fs would be able to block 1358 * unrelated: 1359 * 1360 * - page migration 1361 * - sync(2) 1362 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER 1363 */ 1364 static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 1365 { 1366 struct page *page = vmf->page; 1367 /* 1368 * Don't use page->mapping as it may become NULL from a 1369 * concurrent truncate. 1370 */ 1371 struct inode *inode = vma->vm_file->f_mapping->host; 1372 1373 fuse_wait_on_page_writeback(inode, page->index); 1374 return 0; 1375 } 1376 1377 static const struct vm_operations_struct fuse_file_vm_ops = { 1378 .close = fuse_vma_close, 1379 .fault = filemap_fault, 1380 .page_mkwrite = fuse_page_mkwrite, 1381 }; 1382 1383 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) 1384 { 1385 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) { 1386 struct inode *inode = file->f_dentry->d_inode; 1387 struct fuse_conn *fc = get_fuse_conn(inode); 1388 struct fuse_inode *fi = get_fuse_inode(inode); 1389 struct fuse_file *ff = file->private_data; 1390 /* 1391 * file may be written through mmap, so chain it onto the 1392 * inodes's write_file list 1393 */ 1394 spin_lock(&fc->lock); 1395 if (list_empty(&ff->write_entry)) 1396 list_add(&ff->write_entry, &fi->write_files); 1397 spin_unlock(&fc->lock); 1398 } 1399 file_accessed(file); 1400 vma->vm_ops = &fuse_file_vm_ops; 1401 return 0; 1402 } 1403 1404 static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma) 1405 { 1406 /* Can't provide the coherency needed for MAP_SHARED */ 1407 if (vma->vm_flags & VM_MAYSHARE) 1408 return -ENODEV; 1409 1410 invalidate_inode_pages2(file->f_mapping); 1411 1412 return generic_file_mmap(file, vma); 1413 } 1414 1415 static int convert_fuse_file_lock(const struct fuse_file_lock *ffl, 1416 struct file_lock *fl) 1417 { 1418 switch (ffl->type) { 1419 case F_UNLCK: 1420 break; 1421 1422 case F_RDLCK: 1423 case F_WRLCK: 1424 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX || 1425 ffl->end < ffl->start) 1426 return -EIO; 1427 1428 fl->fl_start = ffl->start; 1429 fl->fl_end = ffl->end; 1430 fl->fl_pid = ffl->pid; 1431 break; 1432 1433 default: 1434 return -EIO; 1435 } 1436 fl->fl_type = ffl->type; 1437 return 0; 1438 } 1439 1440 static void fuse_lk_fill(struct fuse_req *req, struct file *file, 1441 const struct file_lock *fl, int opcode, pid_t pid, 1442 int flock) 1443 { 1444 struct inode *inode = file->f_path.dentry->d_inode; 1445 struct fuse_conn *fc = get_fuse_conn(inode); 1446 struct fuse_file *ff = file->private_data; 1447 struct fuse_lk_in *arg = &req->misc.lk_in; 1448 1449 arg->fh = ff->fh; 1450 arg->owner = fuse_lock_owner_id(fc, fl->fl_owner); 1451 arg->lk.start = fl->fl_start; 1452 arg->lk.end = fl->fl_end; 1453 arg->lk.type = fl->fl_type; 1454 arg->lk.pid = pid; 1455 if (flock) 1456 arg->lk_flags |= FUSE_LK_FLOCK; 1457 req->in.h.opcode = opcode; 1458 req->in.h.nodeid = get_node_id(inode); 1459 req->in.numargs = 1; 1460 req->in.args[0].size = sizeof(*arg); 1461 req->in.args[0].value = arg; 1462 } 1463 1464 static int fuse_getlk(struct file *file, struct file_lock *fl) 1465 { 1466 struct inode *inode = file->f_path.dentry->d_inode; 1467 struct fuse_conn *fc = get_fuse_conn(inode); 1468 struct fuse_req *req; 1469 struct fuse_lk_out outarg; 1470 int err; 1471 1472 req = fuse_get_req(fc); 1473 if (IS_ERR(req)) 1474 return PTR_ERR(req); 1475 1476 fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0); 1477 req->out.numargs = 1; 1478 req->out.args[0].size = sizeof(outarg); 1479 req->out.args[0].value = &outarg; 1480 fuse_request_send(fc, req); 1481 err = req->out.h.error; 1482 fuse_put_request(fc, req); 1483 if (!err) 1484 err = convert_fuse_file_lock(&outarg.lk, fl); 1485 1486 return err; 1487 } 1488 1489 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) 1490 { 1491 struct inode *inode = file->f_path.dentry->d_inode; 1492 struct fuse_conn *fc = get_fuse_conn(inode); 1493 struct fuse_req *req; 1494 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; 1495 pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0; 1496 int err; 1497 1498 if (fl->fl_lmops && fl->fl_lmops->fl_grant) { 1499 /* NLM needs asynchronous locks, which we don't support yet */ 1500 return -ENOLCK; 1501 } 1502 1503 /* Unlock on close is handled by the flush method */ 1504 if (fl->fl_flags & FL_CLOSE) 1505 return 0; 1506 1507 req = fuse_get_req(fc); 1508 if (IS_ERR(req)) 1509 return PTR_ERR(req); 1510 1511 fuse_lk_fill(req, file, fl, opcode, pid, flock); 1512 fuse_request_send(fc, req); 1513 err = req->out.h.error; 1514 /* locking is restartable */ 1515 if (err == -EINTR) 1516 err = -ERESTARTSYS; 1517 fuse_put_request(fc, req); 1518 return err; 1519 } 1520 1521 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) 1522 { 1523 struct inode *inode = file->f_path.dentry->d_inode; 1524 struct fuse_conn *fc = get_fuse_conn(inode); 1525 int err; 1526 1527 if (cmd == F_CANCELLK) { 1528 err = 0; 1529 } else if (cmd == F_GETLK) { 1530 if (fc->no_lock) { 1531 posix_test_lock(file, fl); 1532 err = 0; 1533 } else 1534 err = fuse_getlk(file, fl); 1535 } else { 1536 if (fc->no_lock) 1537 err = posix_lock_file(file, fl, NULL); 1538 else 1539 err = fuse_setlk(file, fl, 0); 1540 } 1541 return err; 1542 } 1543 1544 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) 1545 { 1546 struct inode *inode = file->f_path.dentry->d_inode; 1547 struct fuse_conn *fc = get_fuse_conn(inode); 1548 int err; 1549 1550 if (fc->no_lock) { 1551 err = flock_lock_file_wait(file, fl); 1552 } else { 1553 /* emulate flock with POSIX locks */ 1554 fl->fl_owner = (fl_owner_t) file; 1555 err = fuse_setlk(file, fl, 1); 1556 } 1557 1558 return err; 1559 } 1560 1561 static sector_t fuse_bmap(struct address_space *mapping, sector_t block) 1562 { 1563 struct inode *inode = mapping->host; 1564 struct fuse_conn *fc = get_fuse_conn(inode); 1565 struct fuse_req *req; 1566 struct fuse_bmap_in inarg; 1567 struct fuse_bmap_out outarg; 1568 int err; 1569 1570 if (!inode->i_sb->s_bdev || fc->no_bmap) 1571 return 0; 1572 1573 req = fuse_get_req(fc); 1574 if (IS_ERR(req)) 1575 return 0; 1576 1577 memset(&inarg, 0, sizeof(inarg)); 1578 inarg.block = block; 1579 inarg.blocksize = inode->i_sb->s_blocksize; 1580 req->in.h.opcode = FUSE_BMAP; 1581 req->in.h.nodeid = get_node_id(inode); 1582 req->in.numargs = 1; 1583 req->in.args[0].size = sizeof(inarg); 1584 req->in.args[0].value = &inarg; 1585 req->out.numargs = 1; 1586 req->out.args[0].size = sizeof(outarg); 1587 req->out.args[0].value = &outarg; 1588 fuse_request_send(fc, req); 1589 err = req->out.h.error; 1590 fuse_put_request(fc, req); 1591 if (err == -ENOSYS) 1592 fc->no_bmap = 1; 1593 1594 return err ? 0 : outarg.block; 1595 } 1596 1597 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin) 1598 { 1599 loff_t retval; 1600 struct inode *inode = file->f_path.dentry->d_inode; 1601 1602 mutex_lock(&inode->i_mutex); 1603 switch (origin) { 1604 case SEEK_END: 1605 retval = fuse_update_attributes(inode, NULL, file, NULL); 1606 if (retval) 1607 goto exit; 1608 offset += i_size_read(inode); 1609 break; 1610 case SEEK_CUR: 1611 offset += file->f_pos; 1612 } 1613 retval = -EINVAL; 1614 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) { 1615 if (offset != file->f_pos) { 1616 file->f_pos = offset; 1617 file->f_version = 0; 1618 } 1619 retval = offset; 1620 } 1621 exit: 1622 mutex_unlock(&inode->i_mutex); 1623 return retval; 1624 } 1625 1626 static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov, 1627 unsigned int nr_segs, size_t bytes, bool to_user) 1628 { 1629 struct iov_iter ii; 1630 int page_idx = 0; 1631 1632 if (!bytes) 1633 return 0; 1634 1635 iov_iter_init(&ii, iov, nr_segs, bytes, 0); 1636 1637 while (iov_iter_count(&ii)) { 1638 struct page *page = pages[page_idx++]; 1639 size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii)); 1640 void *kaddr; 1641 1642 kaddr = kmap(page); 1643 1644 while (todo) { 1645 char __user *uaddr = ii.iov->iov_base + ii.iov_offset; 1646 size_t iov_len = ii.iov->iov_len - ii.iov_offset; 1647 size_t copy = min(todo, iov_len); 1648 size_t left; 1649 1650 if (!to_user) 1651 left = copy_from_user(kaddr, uaddr, copy); 1652 else 1653 left = copy_to_user(uaddr, kaddr, copy); 1654 1655 if (unlikely(left)) 1656 return -EFAULT; 1657 1658 iov_iter_advance(&ii, copy); 1659 todo -= copy; 1660 kaddr += copy; 1661 } 1662 1663 kunmap(page); 1664 } 1665 1666 return 0; 1667 } 1668 1669 /* 1670 * CUSE servers compiled on 32bit broke on 64bit kernels because the 1671 * ABI was defined to be 'struct iovec' which is different on 32bit 1672 * and 64bit. Fortunately we can determine which structure the server 1673 * used from the size of the reply. 1674 */ 1675 static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src, 1676 size_t transferred, unsigned count, 1677 bool is_compat) 1678 { 1679 #ifdef CONFIG_COMPAT 1680 if (count * sizeof(struct compat_iovec) == transferred) { 1681 struct compat_iovec *ciov = src; 1682 unsigned i; 1683 1684 /* 1685 * With this interface a 32bit server cannot support 1686 * non-compat (i.e. ones coming from 64bit apps) ioctl 1687 * requests 1688 */ 1689 if (!is_compat) 1690 return -EINVAL; 1691 1692 for (i = 0; i < count; i++) { 1693 dst[i].iov_base = compat_ptr(ciov[i].iov_base); 1694 dst[i].iov_len = ciov[i].iov_len; 1695 } 1696 return 0; 1697 } 1698 #endif 1699 1700 if (count * sizeof(struct iovec) != transferred) 1701 return -EIO; 1702 1703 memcpy(dst, src, transferred); 1704 return 0; 1705 } 1706 1707 /* Make sure iov_length() won't overflow */ 1708 static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) 1709 { 1710 size_t n; 1711 u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; 1712 1713 for (n = 0; n < count; n++) { 1714 if (iov->iov_len > (size_t) max) 1715 return -ENOMEM; 1716 max -= iov->iov_len; 1717 } 1718 return 0; 1719 } 1720 1721 static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst, 1722 void *src, size_t transferred, unsigned count, 1723 bool is_compat) 1724 { 1725 unsigned i; 1726 struct fuse_ioctl_iovec *fiov = src; 1727 1728 if (fc->minor < 16) { 1729 return fuse_copy_ioctl_iovec_old(dst, src, transferred, 1730 count, is_compat); 1731 } 1732 1733 if (count * sizeof(struct fuse_ioctl_iovec) != transferred) 1734 return -EIO; 1735 1736 for (i = 0; i < count; i++) { 1737 /* Did the server supply an inappropriate value? */ 1738 if (fiov[i].base != (unsigned long) fiov[i].base || 1739 fiov[i].len != (unsigned long) fiov[i].len) 1740 return -EIO; 1741 1742 dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base; 1743 dst[i].iov_len = (size_t) fiov[i].len; 1744 1745 #ifdef CONFIG_COMPAT 1746 if (is_compat && 1747 (ptr_to_compat(dst[i].iov_base) != fiov[i].base || 1748 (compat_size_t) dst[i].iov_len != fiov[i].len)) 1749 return -EIO; 1750 #endif 1751 } 1752 1753 return 0; 1754 } 1755 1756 1757 /* 1758 * For ioctls, there is no generic way to determine how much memory 1759 * needs to be read and/or written. Furthermore, ioctls are allowed 1760 * to dereference the passed pointer, so the parameter requires deep 1761 * copying but FUSE has no idea whatsoever about what to copy in or 1762 * out. 1763 * 1764 * This is solved by allowing FUSE server to retry ioctl with 1765 * necessary in/out iovecs. Let's assume the ioctl implementation 1766 * needs to read in the following structure. 1767 * 1768 * struct a { 1769 * char *buf; 1770 * size_t buflen; 1771 * } 1772 * 1773 * On the first callout to FUSE server, inarg->in_size and 1774 * inarg->out_size will be NULL; then, the server completes the ioctl 1775 * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and 1776 * the actual iov array to 1777 * 1778 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } } 1779 * 1780 * which tells FUSE to copy in the requested area and retry the ioctl. 1781 * On the second round, the server has access to the structure and 1782 * from that it can tell what to look for next, so on the invocation, 1783 * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to 1784 * 1785 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) }, 1786 * { .iov_base = a.buf, .iov_len = a.buflen } } 1787 * 1788 * FUSE will copy both struct a and the pointed buffer from the 1789 * process doing the ioctl and retry ioctl with both struct a and the 1790 * buffer. 1791 * 1792 * This time, FUSE server has everything it needs and completes ioctl 1793 * without FUSE_IOCTL_RETRY which finishes the ioctl call. 1794 * 1795 * Copying data out works the same way. 1796 * 1797 * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel 1798 * automatically initializes in and out iovs by decoding @cmd with 1799 * _IOC_* macros and the server is not allowed to request RETRY. This 1800 * limits ioctl data transfers to well-formed ioctls and is the forced 1801 * behavior for all FUSE servers. 1802 */ 1803 long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, 1804 unsigned int flags) 1805 { 1806 struct fuse_file *ff = file->private_data; 1807 struct fuse_conn *fc = ff->fc; 1808 struct fuse_ioctl_in inarg = { 1809 .fh = ff->fh, 1810 .cmd = cmd, 1811 .arg = arg, 1812 .flags = flags 1813 }; 1814 struct fuse_ioctl_out outarg; 1815 struct fuse_req *req = NULL; 1816 struct page **pages = NULL; 1817 struct iovec *iov_page = NULL; 1818 struct iovec *in_iov = NULL, *out_iov = NULL; 1819 unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages; 1820 size_t in_size, out_size, transferred; 1821 int err; 1822 1823 #if BITS_PER_LONG == 32 1824 inarg.flags |= FUSE_IOCTL_32BIT; 1825 #else 1826 if (flags & FUSE_IOCTL_COMPAT) 1827 inarg.flags |= FUSE_IOCTL_32BIT; 1828 #endif 1829 1830 /* assume all the iovs returned by client always fits in a page */ 1831 BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); 1832 1833 err = -ENOMEM; 1834 pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL); 1835 iov_page = (struct iovec *) __get_free_page(GFP_KERNEL); 1836 if (!pages || !iov_page) 1837 goto out; 1838 1839 /* 1840 * If restricted, initialize IO parameters as encoded in @cmd. 1841 * RETRY from server is not allowed. 1842 */ 1843 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) { 1844 struct iovec *iov = iov_page; 1845 1846 iov->iov_base = (void __user *)arg; 1847 iov->iov_len = _IOC_SIZE(cmd); 1848 1849 if (_IOC_DIR(cmd) & _IOC_WRITE) { 1850 in_iov = iov; 1851 in_iovs = 1; 1852 } 1853 1854 if (_IOC_DIR(cmd) & _IOC_READ) { 1855 out_iov = iov; 1856 out_iovs = 1; 1857 } 1858 } 1859 1860 retry: 1861 inarg.in_size = in_size = iov_length(in_iov, in_iovs); 1862 inarg.out_size = out_size = iov_length(out_iov, out_iovs); 1863 1864 /* 1865 * Out data can be used either for actual out data or iovs, 1866 * make sure there always is at least one page. 1867 */ 1868 out_size = max_t(size_t, out_size, PAGE_SIZE); 1869 max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE); 1870 1871 /* make sure there are enough buffer pages and init request with them */ 1872 err = -ENOMEM; 1873 if (max_pages > FUSE_MAX_PAGES_PER_REQ) 1874 goto out; 1875 while (num_pages < max_pages) { 1876 pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); 1877 if (!pages[num_pages]) 1878 goto out; 1879 num_pages++; 1880 } 1881 1882 req = fuse_get_req(fc); 1883 if (IS_ERR(req)) { 1884 err = PTR_ERR(req); 1885 req = NULL; 1886 goto out; 1887 } 1888 memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages); 1889 req->num_pages = num_pages; 1890 1891 /* okay, let's send it to the client */ 1892 req->in.h.opcode = FUSE_IOCTL; 1893 req->in.h.nodeid = ff->nodeid; 1894 req->in.numargs = 1; 1895 req->in.args[0].size = sizeof(inarg); 1896 req->in.args[0].value = &inarg; 1897 if (in_size) { 1898 req->in.numargs++; 1899 req->in.args[1].size = in_size; 1900 req->in.argpages = 1; 1901 1902 err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size, 1903 false); 1904 if (err) 1905 goto out; 1906 } 1907 1908 req->out.numargs = 2; 1909 req->out.args[0].size = sizeof(outarg); 1910 req->out.args[0].value = &outarg; 1911 req->out.args[1].size = out_size; 1912 req->out.argpages = 1; 1913 req->out.argvar = 1; 1914 1915 fuse_request_send(fc, req); 1916 err = req->out.h.error; 1917 transferred = req->out.args[1].size; 1918 fuse_put_request(fc, req); 1919 req = NULL; 1920 if (err) 1921 goto out; 1922 1923 /* did it ask for retry? */ 1924 if (outarg.flags & FUSE_IOCTL_RETRY) { 1925 void *vaddr; 1926 1927 /* no retry if in restricted mode */ 1928 err = -EIO; 1929 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) 1930 goto out; 1931 1932 in_iovs = outarg.in_iovs; 1933 out_iovs = outarg.out_iovs; 1934 1935 /* 1936 * Make sure things are in boundary, separate checks 1937 * are to protect against overflow. 1938 */ 1939 err = -ENOMEM; 1940 if (in_iovs > FUSE_IOCTL_MAX_IOV || 1941 out_iovs > FUSE_IOCTL_MAX_IOV || 1942 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) 1943 goto out; 1944 1945 vaddr = kmap_atomic(pages[0], KM_USER0); 1946 err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr, 1947 transferred, in_iovs + out_iovs, 1948 (flags & FUSE_IOCTL_COMPAT) != 0); 1949 kunmap_atomic(vaddr, KM_USER0); 1950 if (err) 1951 goto out; 1952 1953 in_iov = iov_page; 1954 out_iov = in_iov + in_iovs; 1955 1956 err = fuse_verify_ioctl_iov(in_iov, in_iovs); 1957 if (err) 1958 goto out; 1959 1960 err = fuse_verify_ioctl_iov(out_iov, out_iovs); 1961 if (err) 1962 goto out; 1963 1964 goto retry; 1965 } 1966 1967 err = -EIO; 1968 if (transferred > inarg.out_size) 1969 goto out; 1970 1971 err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true); 1972 out: 1973 if (req) 1974 fuse_put_request(fc, req); 1975 free_page((unsigned long) iov_page); 1976 while (num_pages) 1977 __free_page(pages[--num_pages]); 1978 kfree(pages); 1979 1980 return err ? err : outarg.result; 1981 } 1982 EXPORT_SYMBOL_GPL(fuse_do_ioctl); 1983 1984 static long fuse_file_ioctl_common(struct file *file, unsigned int cmd, 1985 unsigned long arg, unsigned int flags) 1986 { 1987 struct inode *inode = file->f_dentry->d_inode; 1988 struct fuse_conn *fc = get_fuse_conn(inode); 1989 1990 if (!fuse_allow_task(fc, current)) 1991 return -EACCES; 1992 1993 if (is_bad_inode(inode)) 1994 return -EIO; 1995 1996 return fuse_do_ioctl(file, cmd, arg, flags); 1997 } 1998 1999 static long fuse_file_ioctl(struct file *file, unsigned int cmd, 2000 unsigned long arg) 2001 { 2002 return fuse_file_ioctl_common(file, cmd, arg, 0); 2003 } 2004 2005 static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, 2006 unsigned long arg) 2007 { 2008 return fuse_file_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT); 2009 } 2010 2011 /* 2012 * All files which have been polled are linked to RB tree 2013 * fuse_conn->polled_files which is indexed by kh. Walk the tree and 2014 * find the matching one. 2015 */ 2016 static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh, 2017 struct rb_node **parent_out) 2018 { 2019 struct rb_node **link = &fc->polled_files.rb_node; 2020 struct rb_node *last = NULL; 2021 2022 while (*link) { 2023 struct fuse_file *ff; 2024 2025 last = *link; 2026 ff = rb_entry(last, struct fuse_file, polled_node); 2027 2028 if (kh < ff->kh) 2029 link = &last->rb_left; 2030 else if (kh > ff->kh) 2031 link = &last->rb_right; 2032 else 2033 return link; 2034 } 2035 2036 if (parent_out) 2037 *parent_out = last; 2038 return link; 2039 } 2040 2041 /* 2042 * The file is about to be polled. Make sure it's on the polled_files 2043 * RB tree. Note that files once added to the polled_files tree are 2044 * not removed before the file is released. This is because a file 2045 * polled once is likely to be polled again. 2046 */ 2047 static void fuse_register_polled_file(struct fuse_conn *fc, 2048 struct fuse_file *ff) 2049 { 2050 spin_lock(&fc->lock); 2051 if (RB_EMPTY_NODE(&ff->polled_node)) { 2052 struct rb_node **link, *parent; 2053 2054 link = fuse_find_polled_node(fc, ff->kh, &parent); 2055 BUG_ON(*link); 2056 rb_link_node(&ff->polled_node, parent, link); 2057 rb_insert_color(&ff->polled_node, &fc->polled_files); 2058 } 2059 spin_unlock(&fc->lock); 2060 } 2061 2062 unsigned fuse_file_poll(struct file *file, poll_table *wait) 2063 { 2064 struct fuse_file *ff = file->private_data; 2065 struct fuse_conn *fc = ff->fc; 2066 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; 2067 struct fuse_poll_out outarg; 2068 struct fuse_req *req; 2069 int err; 2070 2071 if (fc->no_poll) 2072 return DEFAULT_POLLMASK; 2073 2074 poll_wait(file, &ff->poll_wait, wait); 2075 2076 /* 2077 * Ask for notification iff there's someone waiting for it. 2078 * The client may ignore the flag and always notify. 2079 */ 2080 if (waitqueue_active(&ff->poll_wait)) { 2081 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; 2082 fuse_register_polled_file(fc, ff); 2083 } 2084 2085 req = fuse_get_req(fc); 2086 if (IS_ERR(req)) 2087 return POLLERR; 2088 2089 req->in.h.opcode = FUSE_POLL; 2090 req->in.h.nodeid = ff->nodeid; 2091 req->in.numargs = 1; 2092 req->in.args[0].size = sizeof(inarg); 2093 req->in.args[0].value = &inarg; 2094 req->out.numargs = 1; 2095 req->out.args[0].size = sizeof(outarg); 2096 req->out.args[0].value = &outarg; 2097 fuse_request_send(fc, req); 2098 err = req->out.h.error; 2099 fuse_put_request(fc, req); 2100 2101 if (!err) 2102 return outarg.revents; 2103 if (err == -ENOSYS) { 2104 fc->no_poll = 1; 2105 return DEFAULT_POLLMASK; 2106 } 2107 return POLLERR; 2108 } 2109 EXPORT_SYMBOL_GPL(fuse_file_poll); 2110 2111 /* 2112 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and 2113 * wakes up the poll waiters. 2114 */ 2115 int fuse_notify_poll_wakeup(struct fuse_conn *fc, 2116 struct fuse_notify_poll_wakeup_out *outarg) 2117 { 2118 u64 kh = outarg->kh; 2119 struct rb_node **link; 2120 2121 spin_lock(&fc->lock); 2122 2123 link = fuse_find_polled_node(fc, kh, NULL); 2124 if (*link) { 2125 struct fuse_file *ff; 2126 2127 ff = rb_entry(*link, struct fuse_file, polled_node); 2128 wake_up_interruptible_sync(&ff->poll_wait); 2129 } 2130 2131 spin_unlock(&fc->lock); 2132 return 0; 2133 } 2134 2135 static const struct file_operations fuse_file_operations = { 2136 .llseek = fuse_file_llseek, 2137 .read = do_sync_read, 2138 .aio_read = fuse_file_aio_read, 2139 .write = do_sync_write, 2140 .aio_write = fuse_file_aio_write, 2141 .mmap = fuse_file_mmap, 2142 .open = fuse_open, 2143 .flush = fuse_flush, 2144 .release = fuse_release, 2145 .fsync = fuse_fsync, 2146 .lock = fuse_file_lock, 2147 .flock = fuse_file_flock, 2148 .splice_read = generic_file_splice_read, 2149 .unlocked_ioctl = fuse_file_ioctl, 2150 .compat_ioctl = fuse_file_compat_ioctl, 2151 .poll = fuse_file_poll, 2152 }; 2153 2154 static const struct file_operations fuse_direct_io_file_operations = { 2155 .llseek = fuse_file_llseek, 2156 .read = fuse_direct_read, 2157 .write = fuse_direct_write, 2158 .mmap = fuse_direct_mmap, 2159 .open = fuse_open, 2160 .flush = fuse_flush, 2161 .release = fuse_release, 2162 .fsync = fuse_fsync, 2163 .lock = fuse_file_lock, 2164 .flock = fuse_file_flock, 2165 .unlocked_ioctl = fuse_file_ioctl, 2166 .compat_ioctl = fuse_file_compat_ioctl, 2167 .poll = fuse_file_poll, 2168 /* no splice_read */ 2169 }; 2170 2171 static const struct address_space_operations fuse_file_aops = { 2172 .readpage = fuse_readpage, 2173 .writepage = fuse_writepage, 2174 .launder_page = fuse_launder_page, 2175 .write_begin = fuse_write_begin, 2176 .write_end = fuse_write_end, 2177 .readpages = fuse_readpages, 2178 .set_page_dirty = __set_page_dirty_nobuffers, 2179 .bmap = fuse_bmap, 2180 }; 2181 2182 void fuse_init_file_inode(struct inode *inode) 2183 { 2184 inode->i_fop = &fuse_file_operations; 2185 inode->i_data.a_ops = &fuse_file_aops; 2186 } 2187