1 /* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7 */ 8 9 #include "fuse_i.h" 10 11 #include <linux/pagemap.h> 12 #include <linux/slab.h> 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/module.h> 16 #include <linux/compat.h> 17 #include <linux/swap.h> 18 19 static const struct file_operations fuse_direct_io_file_operations; 20 21 static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file, 22 int opcode, struct fuse_open_out *outargp) 23 { 24 struct fuse_open_in inarg; 25 struct fuse_req *req; 26 int err; 27 28 req = fuse_get_req(fc); 29 if (IS_ERR(req)) 30 return PTR_ERR(req); 31 32 memset(&inarg, 0, sizeof(inarg)); 33 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); 34 if (!fc->atomic_o_trunc) 35 inarg.flags &= ~O_TRUNC; 36 req->in.h.opcode = opcode; 37 req->in.h.nodeid = nodeid; 38 req->in.numargs = 1; 39 req->in.args[0].size = sizeof(inarg); 40 req->in.args[0].value = &inarg; 41 req->out.numargs = 1; 42 req->out.args[0].size = sizeof(*outargp); 43 req->out.args[0].value = outargp; 44 fuse_request_send(fc, req); 45 err = req->out.h.error; 46 fuse_put_request(fc, req); 47 48 return err; 49 } 50 51 struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) 52 { 53 struct fuse_file *ff; 54 55 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); 56 if (unlikely(!ff)) 57 return NULL; 58 59 ff->fc = fc; 60 ff->reserved_req = fuse_request_alloc(); 61 if (unlikely(!ff->reserved_req)) { 62 kfree(ff); 63 return NULL; 64 } 65 66 INIT_LIST_HEAD(&ff->write_entry); 67 atomic_set(&ff->count, 0); 68 RB_CLEAR_NODE(&ff->polled_node); 69 init_waitqueue_head(&ff->poll_wait); 70 71 spin_lock(&fc->lock); 72 ff->kh = ++fc->khctr; 73 spin_unlock(&fc->lock); 74 75 return ff; 76 } 77 78 void fuse_file_free(struct fuse_file *ff) 79 { 80 fuse_request_free(ff->reserved_req); 81 kfree(ff); 82 } 83 84 struct fuse_file *fuse_file_get(struct fuse_file *ff) 85 { 86 atomic_inc(&ff->count); 87 return ff; 88 } 89 90 static void fuse_release_async(struct work_struct *work) 91 { 92 struct fuse_req *req; 93 struct fuse_conn *fc; 94 struct path path; 95 96 req = container_of(work, struct fuse_req, misc.release.work); 97 path = req->misc.release.path; 98 fc = get_fuse_conn(path.dentry->d_inode); 99 100 fuse_put_request(fc, req); 101 path_put(&path); 102 } 103 104 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) 105 { 106 if (fc->destroy_req) { 107 /* 108 * If this is a fuseblk mount, then it's possible that 109 * releasing the path will result in releasing the 110 * super block and sending the DESTROY request. If 111 * the server is single threaded, this would hang. 112 * For this reason do the path_put() in a separate 113 * thread. 114 */ 115 atomic_inc(&req->count); 116 INIT_WORK(&req->misc.release.work, fuse_release_async); 117 schedule_work(&req->misc.release.work); 118 } else { 119 path_put(&req->misc.release.path); 120 } 121 } 122 123 static void fuse_file_put(struct fuse_file *ff, bool sync) 124 { 125 if (atomic_dec_and_test(&ff->count)) { 126 struct fuse_req *req = ff->reserved_req; 127 128 if (sync) { 129 fuse_request_send(ff->fc, req); 130 path_put(&req->misc.release.path); 131 fuse_put_request(ff->fc, req); 132 } else { 133 req->end = fuse_release_end; 134 fuse_request_send_background(ff->fc, req); 135 } 136 kfree(ff); 137 } 138 } 139 140 int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, 141 bool isdir) 142 { 143 struct fuse_open_out outarg; 144 struct fuse_file *ff; 145 int err; 146 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; 147 148 ff = fuse_file_alloc(fc); 149 if (!ff) 150 return -ENOMEM; 151 152 err = fuse_send_open(fc, nodeid, file, opcode, &outarg); 153 if (err) { 154 fuse_file_free(ff); 155 return err; 156 } 157 158 if (isdir) 159 outarg.open_flags &= ~FOPEN_DIRECT_IO; 160 161 ff->fh = outarg.fh; 162 ff->nodeid = nodeid; 163 ff->open_flags = outarg.open_flags; 164 file->private_data = fuse_file_get(ff); 165 166 return 0; 167 } 168 EXPORT_SYMBOL_GPL(fuse_do_open); 169 170 void fuse_finish_open(struct inode *inode, struct file *file) 171 { 172 struct fuse_file *ff = file->private_data; 173 struct fuse_conn *fc = get_fuse_conn(inode); 174 175 if (ff->open_flags & FOPEN_DIRECT_IO) 176 file->f_op = &fuse_direct_io_file_operations; 177 if (!(ff->open_flags & FOPEN_KEEP_CACHE)) 178 invalidate_inode_pages2(inode->i_mapping); 179 if (ff->open_flags & FOPEN_NONSEEKABLE) 180 nonseekable_open(inode, file); 181 if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { 182 struct fuse_inode *fi = get_fuse_inode(inode); 183 184 spin_lock(&fc->lock); 185 fi->attr_version = ++fc->attr_version; 186 i_size_write(inode, 0); 187 spin_unlock(&fc->lock); 188 fuse_invalidate_attr(inode); 189 } 190 } 191 192 int fuse_open_common(struct inode *inode, struct file *file, bool isdir) 193 { 194 struct fuse_conn *fc = get_fuse_conn(inode); 195 int err; 196 197 err = generic_file_open(inode, file); 198 if (err) 199 return err; 200 201 err = fuse_do_open(fc, get_node_id(inode), file, isdir); 202 if (err) 203 return err; 204 205 fuse_finish_open(inode, file); 206 207 return 0; 208 } 209 210 static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode) 211 { 212 struct fuse_conn *fc = ff->fc; 213 struct fuse_req *req = ff->reserved_req; 214 struct fuse_release_in *inarg = &req->misc.release.in; 215 216 spin_lock(&fc->lock); 217 list_del(&ff->write_entry); 218 if (!RB_EMPTY_NODE(&ff->polled_node)) 219 rb_erase(&ff->polled_node, &fc->polled_files); 220 spin_unlock(&fc->lock); 221 222 wake_up_interruptible_all(&ff->poll_wait); 223 224 inarg->fh = ff->fh; 225 inarg->flags = flags; 226 req->in.h.opcode = opcode; 227 req->in.h.nodeid = ff->nodeid; 228 req->in.numargs = 1; 229 req->in.args[0].size = sizeof(struct fuse_release_in); 230 req->in.args[0].value = inarg; 231 } 232 233 void fuse_release_common(struct file *file, int opcode) 234 { 235 struct fuse_file *ff; 236 struct fuse_req *req; 237 238 ff = file->private_data; 239 if (unlikely(!ff)) 240 return; 241 242 req = ff->reserved_req; 243 fuse_prepare_release(ff, file->f_flags, opcode); 244 245 if (ff->flock) { 246 struct fuse_release_in *inarg = &req->misc.release.in; 247 inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; 248 inarg->lock_owner = fuse_lock_owner_id(ff->fc, 249 (fl_owner_t) file); 250 } 251 /* Hold vfsmount and dentry until release is finished */ 252 path_get(&file->f_path); 253 req->misc.release.path = file->f_path; 254 255 /* 256 * Normally this will send the RELEASE request, however if 257 * some asynchronous READ or WRITE requests are outstanding, 258 * the sending will be delayed. 259 * 260 * Make the release synchronous if this is a fuseblk mount, 261 * synchronous RELEASE is allowed (and desirable) in this case 262 * because the server can be trusted not to screw up. 263 */ 264 fuse_file_put(ff, ff->fc->destroy_req != NULL); 265 } 266 267 static int fuse_open(struct inode *inode, struct file *file) 268 { 269 return fuse_open_common(inode, file, false); 270 } 271 272 static int fuse_release(struct inode *inode, struct file *file) 273 { 274 fuse_release_common(file, FUSE_RELEASE); 275 276 /* return value is ignored by VFS */ 277 return 0; 278 } 279 280 void fuse_sync_release(struct fuse_file *ff, int flags) 281 { 282 WARN_ON(atomic_read(&ff->count) > 1); 283 fuse_prepare_release(ff, flags, FUSE_RELEASE); 284 ff->reserved_req->force = 1; 285 fuse_request_send(ff->fc, ff->reserved_req); 286 fuse_put_request(ff->fc, ff->reserved_req); 287 kfree(ff); 288 } 289 EXPORT_SYMBOL_GPL(fuse_sync_release); 290 291 /* 292 * Scramble the ID space with XTEA, so that the value of the files_struct 293 * pointer is not exposed to userspace. 294 */ 295 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) 296 { 297 u32 *k = fc->scramble_key; 298 u64 v = (unsigned long) id; 299 u32 v0 = v; 300 u32 v1 = v >> 32; 301 u32 sum = 0; 302 int i; 303 304 for (i = 0; i < 32; i++) { 305 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); 306 sum += 0x9E3779B9; 307 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); 308 } 309 310 return (u64) v0 + ((u64) v1 << 32); 311 } 312 313 /* 314 * Check if page is under writeback 315 * 316 * This is currently done by walking the list of writepage requests 317 * for the inode, which can be pretty inefficient. 318 */ 319 static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index) 320 { 321 struct fuse_conn *fc = get_fuse_conn(inode); 322 struct fuse_inode *fi = get_fuse_inode(inode); 323 struct fuse_req *req; 324 bool found = false; 325 326 spin_lock(&fc->lock); 327 list_for_each_entry(req, &fi->writepages, writepages_entry) { 328 pgoff_t curr_index; 329 330 BUG_ON(req->inode != inode); 331 curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT; 332 if (curr_index == index) { 333 found = true; 334 break; 335 } 336 } 337 spin_unlock(&fc->lock); 338 339 return found; 340 } 341 342 /* 343 * Wait for page writeback to be completed. 344 * 345 * Since fuse doesn't rely on the VM writeback tracking, this has to 346 * use some other means. 347 */ 348 static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) 349 { 350 struct fuse_inode *fi = get_fuse_inode(inode); 351 352 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); 353 return 0; 354 } 355 356 static int fuse_flush(struct file *file, fl_owner_t id) 357 { 358 struct inode *inode = file->f_path.dentry->d_inode; 359 struct fuse_conn *fc = get_fuse_conn(inode); 360 struct fuse_file *ff = file->private_data; 361 struct fuse_req *req; 362 struct fuse_flush_in inarg; 363 int err; 364 365 if (is_bad_inode(inode)) 366 return -EIO; 367 368 if (fc->no_flush) 369 return 0; 370 371 req = fuse_get_req_nofail(fc, file); 372 memset(&inarg, 0, sizeof(inarg)); 373 inarg.fh = ff->fh; 374 inarg.lock_owner = fuse_lock_owner_id(fc, id); 375 req->in.h.opcode = FUSE_FLUSH; 376 req->in.h.nodeid = get_node_id(inode); 377 req->in.numargs = 1; 378 req->in.args[0].size = sizeof(inarg); 379 req->in.args[0].value = &inarg; 380 req->force = 1; 381 fuse_request_send(fc, req); 382 err = req->out.h.error; 383 fuse_put_request(fc, req); 384 if (err == -ENOSYS) { 385 fc->no_flush = 1; 386 err = 0; 387 } 388 return err; 389 } 390 391 /* 392 * Wait for all pending writepages on the inode to finish. 393 * 394 * This is currently done by blocking further writes with FUSE_NOWRITE 395 * and waiting for all sent writes to complete. 396 * 397 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage 398 * could conflict with truncation. 399 */ 400 static void fuse_sync_writes(struct inode *inode) 401 { 402 fuse_set_nowrite(inode); 403 fuse_release_nowrite(inode); 404 } 405 406 int fuse_fsync_common(struct file *file, loff_t start, loff_t end, 407 int datasync, int isdir) 408 { 409 struct inode *inode = file->f_mapping->host; 410 struct fuse_conn *fc = get_fuse_conn(inode); 411 struct fuse_file *ff = file->private_data; 412 struct fuse_req *req; 413 struct fuse_fsync_in inarg; 414 int err; 415 416 if (is_bad_inode(inode)) 417 return -EIO; 418 419 err = filemap_write_and_wait_range(inode->i_mapping, start, end); 420 if (err) 421 return err; 422 423 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) 424 return 0; 425 426 mutex_lock(&inode->i_mutex); 427 428 /* 429 * Start writeback against all dirty pages of the inode, then 430 * wait for all outstanding writes, before sending the FSYNC 431 * request. 432 */ 433 err = write_inode_now(inode, 0); 434 if (err) 435 goto out; 436 437 fuse_sync_writes(inode); 438 439 req = fuse_get_req(fc); 440 if (IS_ERR(req)) { 441 err = PTR_ERR(req); 442 goto out; 443 } 444 445 memset(&inarg, 0, sizeof(inarg)); 446 inarg.fh = ff->fh; 447 inarg.fsync_flags = datasync ? 1 : 0; 448 req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; 449 req->in.h.nodeid = get_node_id(inode); 450 req->in.numargs = 1; 451 req->in.args[0].size = sizeof(inarg); 452 req->in.args[0].value = &inarg; 453 fuse_request_send(fc, req); 454 err = req->out.h.error; 455 fuse_put_request(fc, req); 456 if (err == -ENOSYS) { 457 if (isdir) 458 fc->no_fsyncdir = 1; 459 else 460 fc->no_fsync = 1; 461 err = 0; 462 } 463 out: 464 mutex_unlock(&inode->i_mutex); 465 return err; 466 } 467 468 static int fuse_fsync(struct file *file, loff_t start, loff_t end, 469 int datasync) 470 { 471 return fuse_fsync_common(file, start, end, datasync, 0); 472 } 473 474 void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, 475 size_t count, int opcode) 476 { 477 struct fuse_read_in *inarg = &req->misc.read.in; 478 struct fuse_file *ff = file->private_data; 479 480 inarg->fh = ff->fh; 481 inarg->offset = pos; 482 inarg->size = count; 483 inarg->flags = file->f_flags; 484 req->in.h.opcode = opcode; 485 req->in.h.nodeid = ff->nodeid; 486 req->in.numargs = 1; 487 req->in.args[0].size = sizeof(struct fuse_read_in); 488 req->in.args[0].value = inarg; 489 req->out.argvar = 1; 490 req->out.numargs = 1; 491 req->out.args[0].size = count; 492 } 493 494 static size_t fuse_send_read(struct fuse_req *req, struct file *file, 495 loff_t pos, size_t count, fl_owner_t owner) 496 { 497 struct fuse_file *ff = file->private_data; 498 struct fuse_conn *fc = ff->fc; 499 500 fuse_read_fill(req, file, pos, count, FUSE_READ); 501 if (owner != NULL) { 502 struct fuse_read_in *inarg = &req->misc.read.in; 503 504 inarg->read_flags |= FUSE_READ_LOCKOWNER; 505 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 506 } 507 fuse_request_send(fc, req); 508 return req->out.args[0].size; 509 } 510 511 static void fuse_read_update_size(struct inode *inode, loff_t size, 512 u64 attr_ver) 513 { 514 struct fuse_conn *fc = get_fuse_conn(inode); 515 struct fuse_inode *fi = get_fuse_inode(inode); 516 517 spin_lock(&fc->lock); 518 if (attr_ver == fi->attr_version && size < inode->i_size) { 519 fi->attr_version = ++fc->attr_version; 520 i_size_write(inode, size); 521 } 522 spin_unlock(&fc->lock); 523 } 524 525 static int fuse_readpage(struct file *file, struct page *page) 526 { 527 struct inode *inode = page->mapping->host; 528 struct fuse_conn *fc = get_fuse_conn(inode); 529 struct fuse_req *req; 530 size_t num_read; 531 loff_t pos = page_offset(page); 532 size_t count = PAGE_CACHE_SIZE; 533 u64 attr_ver; 534 int err; 535 536 err = -EIO; 537 if (is_bad_inode(inode)) 538 goto out; 539 540 /* 541 * Page writeback can extend beyond the lifetime of the 542 * page-cache page, so make sure we read a properly synced 543 * page. 544 */ 545 fuse_wait_on_page_writeback(inode, page->index); 546 547 req = fuse_get_req(fc); 548 err = PTR_ERR(req); 549 if (IS_ERR(req)) 550 goto out; 551 552 attr_ver = fuse_get_attr_version(fc); 553 554 req->out.page_zeroing = 1; 555 req->out.argpages = 1; 556 req->num_pages = 1; 557 req->pages[0] = page; 558 num_read = fuse_send_read(req, file, pos, count, NULL); 559 err = req->out.h.error; 560 fuse_put_request(fc, req); 561 562 if (!err) { 563 /* 564 * Short read means EOF. If file size is larger, truncate it 565 */ 566 if (num_read < count) 567 fuse_read_update_size(inode, pos + num_read, attr_ver); 568 569 SetPageUptodate(page); 570 } 571 572 fuse_invalidate_attr(inode); /* atime changed */ 573 out: 574 unlock_page(page); 575 return err; 576 } 577 578 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) 579 { 580 int i; 581 size_t count = req->misc.read.in.size; 582 size_t num_read = req->out.args[0].size; 583 struct address_space *mapping = NULL; 584 585 for (i = 0; mapping == NULL && i < req->num_pages; i++) 586 mapping = req->pages[i]->mapping; 587 588 if (mapping) { 589 struct inode *inode = mapping->host; 590 591 /* 592 * Short read means EOF. If file size is larger, truncate it 593 */ 594 if (!req->out.h.error && num_read < count) { 595 loff_t pos; 596 597 pos = page_offset(req->pages[0]) + num_read; 598 fuse_read_update_size(inode, pos, 599 req->misc.read.attr_ver); 600 } 601 fuse_invalidate_attr(inode); /* atime changed */ 602 } 603 604 for (i = 0; i < req->num_pages; i++) { 605 struct page *page = req->pages[i]; 606 if (!req->out.h.error) 607 SetPageUptodate(page); 608 else 609 SetPageError(page); 610 unlock_page(page); 611 page_cache_release(page); 612 } 613 if (req->ff) 614 fuse_file_put(req->ff, false); 615 } 616 617 static void fuse_send_readpages(struct fuse_req *req, struct file *file) 618 { 619 struct fuse_file *ff = file->private_data; 620 struct fuse_conn *fc = ff->fc; 621 loff_t pos = page_offset(req->pages[0]); 622 size_t count = req->num_pages << PAGE_CACHE_SHIFT; 623 624 req->out.argpages = 1; 625 req->out.page_zeroing = 1; 626 req->out.page_replace = 1; 627 fuse_read_fill(req, file, pos, count, FUSE_READ); 628 req->misc.read.attr_ver = fuse_get_attr_version(fc); 629 if (fc->async_read) { 630 req->ff = fuse_file_get(ff); 631 req->end = fuse_readpages_end; 632 fuse_request_send_background(fc, req); 633 } else { 634 fuse_request_send(fc, req); 635 fuse_readpages_end(fc, req); 636 fuse_put_request(fc, req); 637 } 638 } 639 640 struct fuse_fill_data { 641 struct fuse_req *req; 642 struct file *file; 643 struct inode *inode; 644 }; 645 646 static int fuse_readpages_fill(void *_data, struct page *page) 647 { 648 struct fuse_fill_data *data = _data; 649 struct fuse_req *req = data->req; 650 struct inode *inode = data->inode; 651 struct fuse_conn *fc = get_fuse_conn(inode); 652 653 fuse_wait_on_page_writeback(inode, page->index); 654 655 if (req->num_pages && 656 (req->num_pages == FUSE_MAX_PAGES_PER_REQ || 657 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || 658 req->pages[req->num_pages - 1]->index + 1 != page->index)) { 659 fuse_send_readpages(req, data->file); 660 data->req = req = fuse_get_req(fc); 661 if (IS_ERR(req)) { 662 unlock_page(page); 663 return PTR_ERR(req); 664 } 665 } 666 page_cache_get(page); 667 req->pages[req->num_pages] = page; 668 req->num_pages++; 669 return 0; 670 } 671 672 static int fuse_readpages(struct file *file, struct address_space *mapping, 673 struct list_head *pages, unsigned nr_pages) 674 { 675 struct inode *inode = mapping->host; 676 struct fuse_conn *fc = get_fuse_conn(inode); 677 struct fuse_fill_data data; 678 int err; 679 680 err = -EIO; 681 if (is_bad_inode(inode)) 682 goto out; 683 684 data.file = file; 685 data.inode = inode; 686 data.req = fuse_get_req(fc); 687 err = PTR_ERR(data.req); 688 if (IS_ERR(data.req)) 689 goto out; 690 691 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); 692 if (!err) { 693 if (data.req->num_pages) 694 fuse_send_readpages(data.req, file); 695 else 696 fuse_put_request(fc, data.req); 697 } 698 out: 699 return err; 700 } 701 702 static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov, 703 unsigned long nr_segs, loff_t pos) 704 { 705 struct inode *inode = iocb->ki_filp->f_mapping->host; 706 707 if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) { 708 int err; 709 /* 710 * If trying to read past EOF, make sure the i_size 711 * attribute is up-to-date. 712 */ 713 err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL); 714 if (err) 715 return err; 716 } 717 718 return generic_file_aio_read(iocb, iov, nr_segs, pos); 719 } 720 721 static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff, 722 loff_t pos, size_t count) 723 { 724 struct fuse_write_in *inarg = &req->misc.write.in; 725 struct fuse_write_out *outarg = &req->misc.write.out; 726 727 inarg->fh = ff->fh; 728 inarg->offset = pos; 729 inarg->size = count; 730 req->in.h.opcode = FUSE_WRITE; 731 req->in.h.nodeid = ff->nodeid; 732 req->in.numargs = 2; 733 if (ff->fc->minor < 9) 734 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; 735 else 736 req->in.args[0].size = sizeof(struct fuse_write_in); 737 req->in.args[0].value = inarg; 738 req->in.args[1].size = count; 739 req->out.numargs = 1; 740 req->out.args[0].size = sizeof(struct fuse_write_out); 741 req->out.args[0].value = outarg; 742 } 743 744 static size_t fuse_send_write(struct fuse_req *req, struct file *file, 745 loff_t pos, size_t count, fl_owner_t owner) 746 { 747 struct fuse_file *ff = file->private_data; 748 struct fuse_conn *fc = ff->fc; 749 struct fuse_write_in *inarg = &req->misc.write.in; 750 751 fuse_write_fill(req, ff, pos, count); 752 inarg->flags = file->f_flags; 753 if (owner != NULL) { 754 inarg->write_flags |= FUSE_WRITE_LOCKOWNER; 755 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 756 } 757 fuse_request_send(fc, req); 758 return req->misc.write.out.size; 759 } 760 761 void fuse_write_update_size(struct inode *inode, loff_t pos) 762 { 763 struct fuse_conn *fc = get_fuse_conn(inode); 764 struct fuse_inode *fi = get_fuse_inode(inode); 765 766 spin_lock(&fc->lock); 767 fi->attr_version = ++fc->attr_version; 768 if (pos > inode->i_size) 769 i_size_write(inode, pos); 770 spin_unlock(&fc->lock); 771 } 772 773 static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, 774 struct inode *inode, loff_t pos, 775 size_t count) 776 { 777 size_t res; 778 unsigned offset; 779 unsigned i; 780 781 for (i = 0; i < req->num_pages; i++) 782 fuse_wait_on_page_writeback(inode, req->pages[i]->index); 783 784 res = fuse_send_write(req, file, pos, count, NULL); 785 786 offset = req->page_offset; 787 count = res; 788 for (i = 0; i < req->num_pages; i++) { 789 struct page *page = req->pages[i]; 790 791 if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE) 792 SetPageUptodate(page); 793 794 if (count > PAGE_CACHE_SIZE - offset) 795 count -= PAGE_CACHE_SIZE - offset; 796 else 797 count = 0; 798 offset = 0; 799 800 unlock_page(page); 801 page_cache_release(page); 802 } 803 804 return res; 805 } 806 807 static ssize_t fuse_fill_write_pages(struct fuse_req *req, 808 struct address_space *mapping, 809 struct iov_iter *ii, loff_t pos) 810 { 811 struct fuse_conn *fc = get_fuse_conn(mapping->host); 812 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 813 size_t count = 0; 814 int err; 815 816 req->in.argpages = 1; 817 req->page_offset = offset; 818 819 do { 820 size_t tmp; 821 struct page *page; 822 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 823 size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, 824 iov_iter_count(ii)); 825 826 bytes = min_t(size_t, bytes, fc->max_write - count); 827 828 again: 829 err = -EFAULT; 830 if (iov_iter_fault_in_readable(ii, bytes)) 831 break; 832 833 err = -ENOMEM; 834 page = grab_cache_page_write_begin(mapping, index, 0); 835 if (!page) 836 break; 837 838 if (mapping_writably_mapped(mapping)) 839 flush_dcache_page(page); 840 841 pagefault_disable(); 842 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); 843 pagefault_enable(); 844 flush_dcache_page(page); 845 846 mark_page_accessed(page); 847 848 if (!tmp) { 849 unlock_page(page); 850 page_cache_release(page); 851 bytes = min(bytes, iov_iter_single_seg_count(ii)); 852 goto again; 853 } 854 855 err = 0; 856 req->pages[req->num_pages] = page; 857 req->num_pages++; 858 859 iov_iter_advance(ii, tmp); 860 count += tmp; 861 pos += tmp; 862 offset += tmp; 863 if (offset == PAGE_CACHE_SIZE) 864 offset = 0; 865 866 if (!fc->big_writes) 867 break; 868 } while (iov_iter_count(ii) && count < fc->max_write && 869 req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0); 870 871 return count > 0 ? count : err; 872 } 873 874 static ssize_t fuse_perform_write(struct file *file, 875 struct address_space *mapping, 876 struct iov_iter *ii, loff_t pos) 877 { 878 struct inode *inode = mapping->host; 879 struct fuse_conn *fc = get_fuse_conn(inode); 880 int err = 0; 881 ssize_t res = 0; 882 883 if (is_bad_inode(inode)) 884 return -EIO; 885 886 do { 887 struct fuse_req *req; 888 ssize_t count; 889 890 req = fuse_get_req(fc); 891 if (IS_ERR(req)) { 892 err = PTR_ERR(req); 893 break; 894 } 895 896 count = fuse_fill_write_pages(req, mapping, ii, pos); 897 if (count <= 0) { 898 err = count; 899 } else { 900 size_t num_written; 901 902 num_written = fuse_send_write_pages(req, file, inode, 903 pos, count); 904 err = req->out.h.error; 905 if (!err) { 906 res += num_written; 907 pos += num_written; 908 909 /* break out of the loop on short write */ 910 if (num_written != count) 911 err = -EIO; 912 } 913 } 914 fuse_put_request(fc, req); 915 } while (!err && iov_iter_count(ii)); 916 917 if (res > 0) 918 fuse_write_update_size(inode, pos); 919 920 fuse_invalidate_attr(inode); 921 922 return res > 0 ? res : err; 923 } 924 925 static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov, 926 unsigned long nr_segs, loff_t pos) 927 { 928 struct file *file = iocb->ki_filp; 929 struct address_space *mapping = file->f_mapping; 930 size_t count = 0; 931 size_t ocount = 0; 932 ssize_t written = 0; 933 ssize_t written_buffered = 0; 934 struct inode *inode = mapping->host; 935 ssize_t err; 936 struct iov_iter i; 937 loff_t endbyte = 0; 938 939 WARN_ON(iocb->ki_pos != pos); 940 941 ocount = 0; 942 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); 943 if (err) 944 return err; 945 946 count = ocount; 947 948 mutex_lock(&inode->i_mutex); 949 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 950 951 /* We can write back this queue in page reclaim */ 952 current->backing_dev_info = mapping->backing_dev_info; 953 954 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 955 if (err) 956 goto out; 957 958 if (count == 0) 959 goto out; 960 961 err = file_remove_suid(file); 962 if (err) 963 goto out; 964 965 file_update_time(file); 966 967 if (file->f_flags & O_DIRECT) { 968 written = generic_file_direct_write(iocb, iov, &nr_segs, 969 pos, &iocb->ki_pos, 970 count, ocount); 971 if (written < 0 || written == count) 972 goto out; 973 974 pos += written; 975 count -= written; 976 977 iov_iter_init(&i, iov, nr_segs, count, written); 978 written_buffered = fuse_perform_write(file, mapping, &i, pos); 979 if (written_buffered < 0) { 980 err = written_buffered; 981 goto out; 982 } 983 endbyte = pos + written_buffered - 1; 984 985 err = filemap_write_and_wait_range(file->f_mapping, pos, 986 endbyte); 987 if (err) 988 goto out; 989 990 invalidate_mapping_pages(file->f_mapping, 991 pos >> PAGE_CACHE_SHIFT, 992 endbyte >> PAGE_CACHE_SHIFT); 993 994 written += written_buffered; 995 iocb->ki_pos = pos + written_buffered; 996 } else { 997 iov_iter_init(&i, iov, nr_segs, count, 0); 998 written = fuse_perform_write(file, mapping, &i, pos); 999 if (written >= 0) 1000 iocb->ki_pos = pos + written; 1001 } 1002 out: 1003 current->backing_dev_info = NULL; 1004 mutex_unlock(&inode->i_mutex); 1005 1006 return written ? written : err; 1007 } 1008 1009 static void fuse_release_user_pages(struct fuse_req *req, int write) 1010 { 1011 unsigned i; 1012 1013 for (i = 0; i < req->num_pages; i++) { 1014 struct page *page = req->pages[i]; 1015 if (write) 1016 set_page_dirty_lock(page); 1017 put_page(page); 1018 } 1019 } 1020 1021 static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf, 1022 size_t *nbytesp, int write) 1023 { 1024 size_t nbytes = *nbytesp; 1025 unsigned long user_addr = (unsigned long) buf; 1026 unsigned offset = user_addr & ~PAGE_MASK; 1027 int npages; 1028 1029 /* Special case for kernel I/O: can copy directly into the buffer */ 1030 if (segment_eq(get_fs(), KERNEL_DS)) { 1031 if (write) 1032 req->in.args[1].value = (void *) user_addr; 1033 else 1034 req->out.args[0].value = (void *) user_addr; 1035 1036 return 0; 1037 } 1038 1039 nbytes = min_t(size_t, nbytes, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); 1040 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 1041 npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ); 1042 npages = get_user_pages_fast(user_addr, npages, !write, req->pages); 1043 if (npages < 0) 1044 return npages; 1045 1046 req->num_pages = npages; 1047 req->page_offset = offset; 1048 1049 if (write) 1050 req->in.argpages = 1; 1051 else 1052 req->out.argpages = 1; 1053 1054 nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset; 1055 *nbytesp = min(*nbytesp, nbytes); 1056 1057 return 0; 1058 } 1059 1060 ssize_t fuse_direct_io(struct file *file, const char __user *buf, 1061 size_t count, loff_t *ppos, int write) 1062 { 1063 struct fuse_file *ff = file->private_data; 1064 struct fuse_conn *fc = ff->fc; 1065 size_t nmax = write ? fc->max_write : fc->max_read; 1066 loff_t pos = *ppos; 1067 ssize_t res = 0; 1068 struct fuse_req *req; 1069 1070 req = fuse_get_req(fc); 1071 if (IS_ERR(req)) 1072 return PTR_ERR(req); 1073 1074 while (count) { 1075 size_t nres; 1076 fl_owner_t owner = current->files; 1077 size_t nbytes = min(count, nmax); 1078 int err = fuse_get_user_pages(req, buf, &nbytes, write); 1079 if (err) { 1080 res = err; 1081 break; 1082 } 1083 1084 if (write) 1085 nres = fuse_send_write(req, file, pos, nbytes, owner); 1086 else 1087 nres = fuse_send_read(req, file, pos, nbytes, owner); 1088 1089 fuse_release_user_pages(req, !write); 1090 if (req->out.h.error) { 1091 if (!res) 1092 res = req->out.h.error; 1093 break; 1094 } else if (nres > nbytes) { 1095 res = -EIO; 1096 break; 1097 } 1098 count -= nres; 1099 res += nres; 1100 pos += nres; 1101 buf += nres; 1102 if (nres != nbytes) 1103 break; 1104 if (count) { 1105 fuse_put_request(fc, req); 1106 req = fuse_get_req(fc); 1107 if (IS_ERR(req)) 1108 break; 1109 } 1110 } 1111 if (!IS_ERR(req)) 1112 fuse_put_request(fc, req); 1113 if (res > 0) 1114 *ppos = pos; 1115 1116 return res; 1117 } 1118 EXPORT_SYMBOL_GPL(fuse_direct_io); 1119 1120 static ssize_t fuse_direct_read(struct file *file, char __user *buf, 1121 size_t count, loff_t *ppos) 1122 { 1123 ssize_t res; 1124 struct inode *inode = file->f_path.dentry->d_inode; 1125 1126 if (is_bad_inode(inode)) 1127 return -EIO; 1128 1129 res = fuse_direct_io(file, buf, count, ppos, 0); 1130 1131 fuse_invalidate_attr(inode); 1132 1133 return res; 1134 } 1135 1136 static ssize_t __fuse_direct_write(struct file *file, const char __user *buf, 1137 size_t count, loff_t *ppos) 1138 { 1139 struct inode *inode = file->f_path.dentry->d_inode; 1140 ssize_t res; 1141 1142 res = generic_write_checks(file, ppos, &count, 0); 1143 if (!res) { 1144 res = fuse_direct_io(file, buf, count, ppos, 1); 1145 if (res > 0) 1146 fuse_write_update_size(inode, *ppos); 1147 } 1148 1149 fuse_invalidate_attr(inode); 1150 1151 return res; 1152 } 1153 1154 static ssize_t fuse_direct_write(struct file *file, const char __user *buf, 1155 size_t count, loff_t *ppos) 1156 { 1157 struct inode *inode = file->f_path.dentry->d_inode; 1158 ssize_t res; 1159 1160 if (is_bad_inode(inode)) 1161 return -EIO; 1162 1163 /* Don't allow parallel writes to the same file */ 1164 mutex_lock(&inode->i_mutex); 1165 res = __fuse_direct_write(file, buf, count, ppos); 1166 mutex_unlock(&inode->i_mutex); 1167 1168 return res; 1169 } 1170 1171 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) 1172 { 1173 __free_page(req->pages[0]); 1174 fuse_file_put(req->ff, false); 1175 } 1176 1177 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) 1178 { 1179 struct inode *inode = req->inode; 1180 struct fuse_inode *fi = get_fuse_inode(inode); 1181 struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; 1182 1183 list_del(&req->writepages_entry); 1184 dec_bdi_stat(bdi, BDI_WRITEBACK); 1185 dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP); 1186 bdi_writeout_inc(bdi); 1187 wake_up(&fi->page_waitq); 1188 } 1189 1190 /* Called under fc->lock, may release and reacquire it */ 1191 static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req) 1192 __releases(fc->lock) 1193 __acquires(fc->lock) 1194 { 1195 struct fuse_inode *fi = get_fuse_inode(req->inode); 1196 loff_t size = i_size_read(req->inode); 1197 struct fuse_write_in *inarg = &req->misc.write.in; 1198 1199 if (!fc->connected) 1200 goto out_free; 1201 1202 if (inarg->offset + PAGE_CACHE_SIZE <= size) { 1203 inarg->size = PAGE_CACHE_SIZE; 1204 } else if (inarg->offset < size) { 1205 inarg->size = size & (PAGE_CACHE_SIZE - 1); 1206 } else { 1207 /* Got truncated off completely */ 1208 goto out_free; 1209 } 1210 1211 req->in.args[1].size = inarg->size; 1212 fi->writectr++; 1213 fuse_request_send_background_locked(fc, req); 1214 return; 1215 1216 out_free: 1217 fuse_writepage_finish(fc, req); 1218 spin_unlock(&fc->lock); 1219 fuse_writepage_free(fc, req); 1220 fuse_put_request(fc, req); 1221 spin_lock(&fc->lock); 1222 } 1223 1224 /* 1225 * If fi->writectr is positive (no truncate or fsync going on) send 1226 * all queued writepage requests. 1227 * 1228 * Called with fc->lock 1229 */ 1230 void fuse_flush_writepages(struct inode *inode) 1231 __releases(fc->lock) 1232 __acquires(fc->lock) 1233 { 1234 struct fuse_conn *fc = get_fuse_conn(inode); 1235 struct fuse_inode *fi = get_fuse_inode(inode); 1236 struct fuse_req *req; 1237 1238 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { 1239 req = list_entry(fi->queued_writes.next, struct fuse_req, list); 1240 list_del_init(&req->list); 1241 fuse_send_writepage(fc, req); 1242 } 1243 } 1244 1245 static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req) 1246 { 1247 struct inode *inode = req->inode; 1248 struct fuse_inode *fi = get_fuse_inode(inode); 1249 1250 mapping_set_error(inode->i_mapping, req->out.h.error); 1251 spin_lock(&fc->lock); 1252 fi->writectr--; 1253 fuse_writepage_finish(fc, req); 1254 spin_unlock(&fc->lock); 1255 fuse_writepage_free(fc, req); 1256 } 1257 1258 static int fuse_writepage_locked(struct page *page) 1259 { 1260 struct address_space *mapping = page->mapping; 1261 struct inode *inode = mapping->host; 1262 struct fuse_conn *fc = get_fuse_conn(inode); 1263 struct fuse_inode *fi = get_fuse_inode(inode); 1264 struct fuse_req *req; 1265 struct fuse_file *ff; 1266 struct page *tmp_page; 1267 1268 set_page_writeback(page); 1269 1270 req = fuse_request_alloc_nofs(); 1271 if (!req) 1272 goto err; 1273 1274 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 1275 if (!tmp_page) 1276 goto err_free; 1277 1278 spin_lock(&fc->lock); 1279 BUG_ON(list_empty(&fi->write_files)); 1280 ff = list_entry(fi->write_files.next, struct fuse_file, write_entry); 1281 req->ff = fuse_file_get(ff); 1282 spin_unlock(&fc->lock); 1283 1284 fuse_write_fill(req, ff, page_offset(page), 0); 1285 1286 copy_highpage(tmp_page, page); 1287 req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; 1288 req->in.argpages = 1; 1289 req->num_pages = 1; 1290 req->pages[0] = tmp_page; 1291 req->page_offset = 0; 1292 req->end = fuse_writepage_end; 1293 req->inode = inode; 1294 1295 inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK); 1296 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); 1297 end_page_writeback(page); 1298 1299 spin_lock(&fc->lock); 1300 list_add(&req->writepages_entry, &fi->writepages); 1301 list_add_tail(&req->list, &fi->queued_writes); 1302 fuse_flush_writepages(inode); 1303 spin_unlock(&fc->lock); 1304 1305 return 0; 1306 1307 err_free: 1308 fuse_request_free(req); 1309 err: 1310 end_page_writeback(page); 1311 return -ENOMEM; 1312 } 1313 1314 static int fuse_writepage(struct page *page, struct writeback_control *wbc) 1315 { 1316 int err; 1317 1318 err = fuse_writepage_locked(page); 1319 unlock_page(page); 1320 1321 return err; 1322 } 1323 1324 static int fuse_launder_page(struct page *page) 1325 { 1326 int err = 0; 1327 if (clear_page_dirty_for_io(page)) { 1328 struct inode *inode = page->mapping->host; 1329 err = fuse_writepage_locked(page); 1330 if (!err) 1331 fuse_wait_on_page_writeback(inode, page->index); 1332 } 1333 return err; 1334 } 1335 1336 /* 1337 * Write back dirty pages now, because there may not be any suitable 1338 * open files later 1339 */ 1340 static void fuse_vma_close(struct vm_area_struct *vma) 1341 { 1342 filemap_write_and_wait(vma->vm_file->f_mapping); 1343 } 1344 1345 /* 1346 * Wait for writeback against this page to complete before allowing it 1347 * to be marked dirty again, and hence written back again, possibly 1348 * before the previous writepage completed. 1349 * 1350 * Block here, instead of in ->writepage(), so that the userspace fs 1351 * can only block processes actually operating on the filesystem. 1352 * 1353 * Otherwise unprivileged userspace fs would be able to block 1354 * unrelated: 1355 * 1356 * - page migration 1357 * - sync(2) 1358 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER 1359 */ 1360 static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 1361 { 1362 struct page *page = vmf->page; 1363 /* 1364 * Don't use page->mapping as it may become NULL from a 1365 * concurrent truncate. 1366 */ 1367 struct inode *inode = vma->vm_file->f_mapping->host; 1368 1369 fuse_wait_on_page_writeback(inode, page->index); 1370 return 0; 1371 } 1372 1373 static const struct vm_operations_struct fuse_file_vm_ops = { 1374 .close = fuse_vma_close, 1375 .fault = filemap_fault, 1376 .page_mkwrite = fuse_page_mkwrite, 1377 }; 1378 1379 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) 1380 { 1381 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) { 1382 struct inode *inode = file->f_dentry->d_inode; 1383 struct fuse_conn *fc = get_fuse_conn(inode); 1384 struct fuse_inode *fi = get_fuse_inode(inode); 1385 struct fuse_file *ff = file->private_data; 1386 /* 1387 * file may be written through mmap, so chain it onto the 1388 * inodes's write_file list 1389 */ 1390 spin_lock(&fc->lock); 1391 if (list_empty(&ff->write_entry)) 1392 list_add(&ff->write_entry, &fi->write_files); 1393 spin_unlock(&fc->lock); 1394 } 1395 file_accessed(file); 1396 vma->vm_ops = &fuse_file_vm_ops; 1397 return 0; 1398 } 1399 1400 static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma) 1401 { 1402 /* Can't provide the coherency needed for MAP_SHARED */ 1403 if (vma->vm_flags & VM_MAYSHARE) 1404 return -ENODEV; 1405 1406 invalidate_inode_pages2(file->f_mapping); 1407 1408 return generic_file_mmap(file, vma); 1409 } 1410 1411 static int convert_fuse_file_lock(const struct fuse_file_lock *ffl, 1412 struct file_lock *fl) 1413 { 1414 switch (ffl->type) { 1415 case F_UNLCK: 1416 break; 1417 1418 case F_RDLCK: 1419 case F_WRLCK: 1420 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX || 1421 ffl->end < ffl->start) 1422 return -EIO; 1423 1424 fl->fl_start = ffl->start; 1425 fl->fl_end = ffl->end; 1426 fl->fl_pid = ffl->pid; 1427 break; 1428 1429 default: 1430 return -EIO; 1431 } 1432 fl->fl_type = ffl->type; 1433 return 0; 1434 } 1435 1436 static void fuse_lk_fill(struct fuse_req *req, struct file *file, 1437 const struct file_lock *fl, int opcode, pid_t pid, 1438 int flock) 1439 { 1440 struct inode *inode = file->f_path.dentry->d_inode; 1441 struct fuse_conn *fc = get_fuse_conn(inode); 1442 struct fuse_file *ff = file->private_data; 1443 struct fuse_lk_in *arg = &req->misc.lk_in; 1444 1445 arg->fh = ff->fh; 1446 arg->owner = fuse_lock_owner_id(fc, fl->fl_owner); 1447 arg->lk.start = fl->fl_start; 1448 arg->lk.end = fl->fl_end; 1449 arg->lk.type = fl->fl_type; 1450 arg->lk.pid = pid; 1451 if (flock) 1452 arg->lk_flags |= FUSE_LK_FLOCK; 1453 req->in.h.opcode = opcode; 1454 req->in.h.nodeid = get_node_id(inode); 1455 req->in.numargs = 1; 1456 req->in.args[0].size = sizeof(*arg); 1457 req->in.args[0].value = arg; 1458 } 1459 1460 static int fuse_getlk(struct file *file, struct file_lock *fl) 1461 { 1462 struct inode *inode = file->f_path.dentry->d_inode; 1463 struct fuse_conn *fc = get_fuse_conn(inode); 1464 struct fuse_req *req; 1465 struct fuse_lk_out outarg; 1466 int err; 1467 1468 req = fuse_get_req(fc); 1469 if (IS_ERR(req)) 1470 return PTR_ERR(req); 1471 1472 fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0); 1473 req->out.numargs = 1; 1474 req->out.args[0].size = sizeof(outarg); 1475 req->out.args[0].value = &outarg; 1476 fuse_request_send(fc, req); 1477 err = req->out.h.error; 1478 fuse_put_request(fc, req); 1479 if (!err) 1480 err = convert_fuse_file_lock(&outarg.lk, fl); 1481 1482 return err; 1483 } 1484 1485 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) 1486 { 1487 struct inode *inode = file->f_path.dentry->d_inode; 1488 struct fuse_conn *fc = get_fuse_conn(inode); 1489 struct fuse_req *req; 1490 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; 1491 pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0; 1492 int err; 1493 1494 if (fl->fl_lmops && fl->fl_lmops->lm_grant) { 1495 /* NLM needs asynchronous locks, which we don't support yet */ 1496 return -ENOLCK; 1497 } 1498 1499 /* Unlock on close is handled by the flush method */ 1500 if (fl->fl_flags & FL_CLOSE) 1501 return 0; 1502 1503 req = fuse_get_req(fc); 1504 if (IS_ERR(req)) 1505 return PTR_ERR(req); 1506 1507 fuse_lk_fill(req, file, fl, opcode, pid, flock); 1508 fuse_request_send(fc, req); 1509 err = req->out.h.error; 1510 /* locking is restartable */ 1511 if (err == -EINTR) 1512 err = -ERESTARTSYS; 1513 fuse_put_request(fc, req); 1514 return err; 1515 } 1516 1517 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) 1518 { 1519 struct inode *inode = file->f_path.dentry->d_inode; 1520 struct fuse_conn *fc = get_fuse_conn(inode); 1521 int err; 1522 1523 if (cmd == F_CANCELLK) { 1524 err = 0; 1525 } else if (cmd == F_GETLK) { 1526 if (fc->no_lock) { 1527 posix_test_lock(file, fl); 1528 err = 0; 1529 } else 1530 err = fuse_getlk(file, fl); 1531 } else { 1532 if (fc->no_lock) 1533 err = posix_lock_file(file, fl, NULL); 1534 else 1535 err = fuse_setlk(file, fl, 0); 1536 } 1537 return err; 1538 } 1539 1540 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) 1541 { 1542 struct inode *inode = file->f_path.dentry->d_inode; 1543 struct fuse_conn *fc = get_fuse_conn(inode); 1544 int err; 1545 1546 if (fc->no_flock) { 1547 err = flock_lock_file_wait(file, fl); 1548 } else { 1549 struct fuse_file *ff = file->private_data; 1550 1551 /* emulate flock with POSIX locks */ 1552 fl->fl_owner = (fl_owner_t) file; 1553 ff->flock = true; 1554 err = fuse_setlk(file, fl, 1); 1555 } 1556 1557 return err; 1558 } 1559 1560 static sector_t fuse_bmap(struct address_space *mapping, sector_t block) 1561 { 1562 struct inode *inode = mapping->host; 1563 struct fuse_conn *fc = get_fuse_conn(inode); 1564 struct fuse_req *req; 1565 struct fuse_bmap_in inarg; 1566 struct fuse_bmap_out outarg; 1567 int err; 1568 1569 if (!inode->i_sb->s_bdev || fc->no_bmap) 1570 return 0; 1571 1572 req = fuse_get_req(fc); 1573 if (IS_ERR(req)) 1574 return 0; 1575 1576 memset(&inarg, 0, sizeof(inarg)); 1577 inarg.block = block; 1578 inarg.blocksize = inode->i_sb->s_blocksize; 1579 req->in.h.opcode = FUSE_BMAP; 1580 req->in.h.nodeid = get_node_id(inode); 1581 req->in.numargs = 1; 1582 req->in.args[0].size = sizeof(inarg); 1583 req->in.args[0].value = &inarg; 1584 req->out.numargs = 1; 1585 req->out.args[0].size = sizeof(outarg); 1586 req->out.args[0].value = &outarg; 1587 fuse_request_send(fc, req); 1588 err = req->out.h.error; 1589 fuse_put_request(fc, req); 1590 if (err == -ENOSYS) 1591 fc->no_bmap = 1; 1592 1593 return err ? 0 : outarg.block; 1594 } 1595 1596 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin) 1597 { 1598 loff_t retval; 1599 struct inode *inode = file->f_path.dentry->d_inode; 1600 1601 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */ 1602 if (origin == SEEK_CUR || origin == SEEK_SET) 1603 return generic_file_llseek(file, offset, origin); 1604 1605 mutex_lock(&inode->i_mutex); 1606 retval = fuse_update_attributes(inode, NULL, file, NULL); 1607 if (!retval) 1608 retval = generic_file_llseek(file, offset, origin); 1609 mutex_unlock(&inode->i_mutex); 1610 1611 return retval; 1612 } 1613 1614 static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov, 1615 unsigned int nr_segs, size_t bytes, bool to_user) 1616 { 1617 struct iov_iter ii; 1618 int page_idx = 0; 1619 1620 if (!bytes) 1621 return 0; 1622 1623 iov_iter_init(&ii, iov, nr_segs, bytes, 0); 1624 1625 while (iov_iter_count(&ii)) { 1626 struct page *page = pages[page_idx++]; 1627 size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii)); 1628 void *kaddr; 1629 1630 kaddr = kmap(page); 1631 1632 while (todo) { 1633 char __user *uaddr = ii.iov->iov_base + ii.iov_offset; 1634 size_t iov_len = ii.iov->iov_len - ii.iov_offset; 1635 size_t copy = min(todo, iov_len); 1636 size_t left; 1637 1638 if (!to_user) 1639 left = copy_from_user(kaddr, uaddr, copy); 1640 else 1641 left = copy_to_user(uaddr, kaddr, copy); 1642 1643 if (unlikely(left)) 1644 return -EFAULT; 1645 1646 iov_iter_advance(&ii, copy); 1647 todo -= copy; 1648 kaddr += copy; 1649 } 1650 1651 kunmap(page); 1652 } 1653 1654 return 0; 1655 } 1656 1657 /* 1658 * CUSE servers compiled on 32bit broke on 64bit kernels because the 1659 * ABI was defined to be 'struct iovec' which is different on 32bit 1660 * and 64bit. Fortunately we can determine which structure the server 1661 * used from the size of the reply. 1662 */ 1663 static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src, 1664 size_t transferred, unsigned count, 1665 bool is_compat) 1666 { 1667 #ifdef CONFIG_COMPAT 1668 if (count * sizeof(struct compat_iovec) == transferred) { 1669 struct compat_iovec *ciov = src; 1670 unsigned i; 1671 1672 /* 1673 * With this interface a 32bit server cannot support 1674 * non-compat (i.e. ones coming from 64bit apps) ioctl 1675 * requests 1676 */ 1677 if (!is_compat) 1678 return -EINVAL; 1679 1680 for (i = 0; i < count; i++) { 1681 dst[i].iov_base = compat_ptr(ciov[i].iov_base); 1682 dst[i].iov_len = ciov[i].iov_len; 1683 } 1684 return 0; 1685 } 1686 #endif 1687 1688 if (count * sizeof(struct iovec) != transferred) 1689 return -EIO; 1690 1691 memcpy(dst, src, transferred); 1692 return 0; 1693 } 1694 1695 /* Make sure iov_length() won't overflow */ 1696 static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) 1697 { 1698 size_t n; 1699 u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; 1700 1701 for (n = 0; n < count; n++) { 1702 if (iov->iov_len > (size_t) max) 1703 return -ENOMEM; 1704 max -= iov->iov_len; 1705 } 1706 return 0; 1707 } 1708 1709 static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst, 1710 void *src, size_t transferred, unsigned count, 1711 bool is_compat) 1712 { 1713 unsigned i; 1714 struct fuse_ioctl_iovec *fiov = src; 1715 1716 if (fc->minor < 16) { 1717 return fuse_copy_ioctl_iovec_old(dst, src, transferred, 1718 count, is_compat); 1719 } 1720 1721 if (count * sizeof(struct fuse_ioctl_iovec) != transferred) 1722 return -EIO; 1723 1724 for (i = 0; i < count; i++) { 1725 /* Did the server supply an inappropriate value? */ 1726 if (fiov[i].base != (unsigned long) fiov[i].base || 1727 fiov[i].len != (unsigned long) fiov[i].len) 1728 return -EIO; 1729 1730 dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base; 1731 dst[i].iov_len = (size_t) fiov[i].len; 1732 1733 #ifdef CONFIG_COMPAT 1734 if (is_compat && 1735 (ptr_to_compat(dst[i].iov_base) != fiov[i].base || 1736 (compat_size_t) dst[i].iov_len != fiov[i].len)) 1737 return -EIO; 1738 #endif 1739 } 1740 1741 return 0; 1742 } 1743 1744 1745 /* 1746 * For ioctls, there is no generic way to determine how much memory 1747 * needs to be read and/or written. Furthermore, ioctls are allowed 1748 * to dereference the passed pointer, so the parameter requires deep 1749 * copying but FUSE has no idea whatsoever about what to copy in or 1750 * out. 1751 * 1752 * This is solved by allowing FUSE server to retry ioctl with 1753 * necessary in/out iovecs. Let's assume the ioctl implementation 1754 * needs to read in the following structure. 1755 * 1756 * struct a { 1757 * char *buf; 1758 * size_t buflen; 1759 * } 1760 * 1761 * On the first callout to FUSE server, inarg->in_size and 1762 * inarg->out_size will be NULL; then, the server completes the ioctl 1763 * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and 1764 * the actual iov array to 1765 * 1766 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } } 1767 * 1768 * which tells FUSE to copy in the requested area and retry the ioctl. 1769 * On the second round, the server has access to the structure and 1770 * from that it can tell what to look for next, so on the invocation, 1771 * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to 1772 * 1773 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) }, 1774 * { .iov_base = a.buf, .iov_len = a.buflen } } 1775 * 1776 * FUSE will copy both struct a and the pointed buffer from the 1777 * process doing the ioctl and retry ioctl with both struct a and the 1778 * buffer. 1779 * 1780 * This time, FUSE server has everything it needs and completes ioctl 1781 * without FUSE_IOCTL_RETRY which finishes the ioctl call. 1782 * 1783 * Copying data out works the same way. 1784 * 1785 * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel 1786 * automatically initializes in and out iovs by decoding @cmd with 1787 * _IOC_* macros and the server is not allowed to request RETRY. This 1788 * limits ioctl data transfers to well-formed ioctls and is the forced 1789 * behavior for all FUSE servers. 1790 */ 1791 long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, 1792 unsigned int flags) 1793 { 1794 struct fuse_file *ff = file->private_data; 1795 struct fuse_conn *fc = ff->fc; 1796 struct fuse_ioctl_in inarg = { 1797 .fh = ff->fh, 1798 .cmd = cmd, 1799 .arg = arg, 1800 .flags = flags 1801 }; 1802 struct fuse_ioctl_out outarg; 1803 struct fuse_req *req = NULL; 1804 struct page **pages = NULL; 1805 struct iovec *iov_page = NULL; 1806 struct iovec *in_iov = NULL, *out_iov = NULL; 1807 unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages; 1808 size_t in_size, out_size, transferred; 1809 int err; 1810 1811 #if BITS_PER_LONG == 32 1812 inarg.flags |= FUSE_IOCTL_32BIT; 1813 #else 1814 if (flags & FUSE_IOCTL_COMPAT) 1815 inarg.flags |= FUSE_IOCTL_32BIT; 1816 #endif 1817 1818 /* assume all the iovs returned by client always fits in a page */ 1819 BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); 1820 1821 err = -ENOMEM; 1822 pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, sizeof(pages[0]), GFP_KERNEL); 1823 iov_page = (struct iovec *) __get_free_page(GFP_KERNEL); 1824 if (!pages || !iov_page) 1825 goto out; 1826 1827 /* 1828 * If restricted, initialize IO parameters as encoded in @cmd. 1829 * RETRY from server is not allowed. 1830 */ 1831 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) { 1832 struct iovec *iov = iov_page; 1833 1834 iov->iov_base = (void __user *)arg; 1835 iov->iov_len = _IOC_SIZE(cmd); 1836 1837 if (_IOC_DIR(cmd) & _IOC_WRITE) { 1838 in_iov = iov; 1839 in_iovs = 1; 1840 } 1841 1842 if (_IOC_DIR(cmd) & _IOC_READ) { 1843 out_iov = iov; 1844 out_iovs = 1; 1845 } 1846 } 1847 1848 retry: 1849 inarg.in_size = in_size = iov_length(in_iov, in_iovs); 1850 inarg.out_size = out_size = iov_length(out_iov, out_iovs); 1851 1852 /* 1853 * Out data can be used either for actual out data or iovs, 1854 * make sure there always is at least one page. 1855 */ 1856 out_size = max_t(size_t, out_size, PAGE_SIZE); 1857 max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE); 1858 1859 /* make sure there are enough buffer pages and init request with them */ 1860 err = -ENOMEM; 1861 if (max_pages > FUSE_MAX_PAGES_PER_REQ) 1862 goto out; 1863 while (num_pages < max_pages) { 1864 pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); 1865 if (!pages[num_pages]) 1866 goto out; 1867 num_pages++; 1868 } 1869 1870 req = fuse_get_req(fc); 1871 if (IS_ERR(req)) { 1872 err = PTR_ERR(req); 1873 req = NULL; 1874 goto out; 1875 } 1876 memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages); 1877 req->num_pages = num_pages; 1878 1879 /* okay, let's send it to the client */ 1880 req->in.h.opcode = FUSE_IOCTL; 1881 req->in.h.nodeid = ff->nodeid; 1882 req->in.numargs = 1; 1883 req->in.args[0].size = sizeof(inarg); 1884 req->in.args[0].value = &inarg; 1885 if (in_size) { 1886 req->in.numargs++; 1887 req->in.args[1].size = in_size; 1888 req->in.argpages = 1; 1889 1890 err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size, 1891 false); 1892 if (err) 1893 goto out; 1894 } 1895 1896 req->out.numargs = 2; 1897 req->out.args[0].size = sizeof(outarg); 1898 req->out.args[0].value = &outarg; 1899 req->out.args[1].size = out_size; 1900 req->out.argpages = 1; 1901 req->out.argvar = 1; 1902 1903 fuse_request_send(fc, req); 1904 err = req->out.h.error; 1905 transferred = req->out.args[1].size; 1906 fuse_put_request(fc, req); 1907 req = NULL; 1908 if (err) 1909 goto out; 1910 1911 /* did it ask for retry? */ 1912 if (outarg.flags & FUSE_IOCTL_RETRY) { 1913 void *vaddr; 1914 1915 /* no retry if in restricted mode */ 1916 err = -EIO; 1917 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) 1918 goto out; 1919 1920 in_iovs = outarg.in_iovs; 1921 out_iovs = outarg.out_iovs; 1922 1923 /* 1924 * Make sure things are in boundary, separate checks 1925 * are to protect against overflow. 1926 */ 1927 err = -ENOMEM; 1928 if (in_iovs > FUSE_IOCTL_MAX_IOV || 1929 out_iovs > FUSE_IOCTL_MAX_IOV || 1930 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) 1931 goto out; 1932 1933 vaddr = kmap_atomic(pages[0]); 1934 err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr, 1935 transferred, in_iovs + out_iovs, 1936 (flags & FUSE_IOCTL_COMPAT) != 0); 1937 kunmap_atomic(vaddr); 1938 if (err) 1939 goto out; 1940 1941 in_iov = iov_page; 1942 out_iov = in_iov + in_iovs; 1943 1944 err = fuse_verify_ioctl_iov(in_iov, in_iovs); 1945 if (err) 1946 goto out; 1947 1948 err = fuse_verify_ioctl_iov(out_iov, out_iovs); 1949 if (err) 1950 goto out; 1951 1952 goto retry; 1953 } 1954 1955 err = -EIO; 1956 if (transferred > inarg.out_size) 1957 goto out; 1958 1959 err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true); 1960 out: 1961 if (req) 1962 fuse_put_request(fc, req); 1963 free_page((unsigned long) iov_page); 1964 while (num_pages) 1965 __free_page(pages[--num_pages]); 1966 kfree(pages); 1967 1968 return err ? err : outarg.result; 1969 } 1970 EXPORT_SYMBOL_GPL(fuse_do_ioctl); 1971 1972 long fuse_ioctl_common(struct file *file, unsigned int cmd, 1973 unsigned long arg, unsigned int flags) 1974 { 1975 struct inode *inode = file->f_dentry->d_inode; 1976 struct fuse_conn *fc = get_fuse_conn(inode); 1977 1978 if (!fuse_allow_task(fc, current)) 1979 return -EACCES; 1980 1981 if (is_bad_inode(inode)) 1982 return -EIO; 1983 1984 return fuse_do_ioctl(file, cmd, arg, flags); 1985 } 1986 1987 static long fuse_file_ioctl(struct file *file, unsigned int cmd, 1988 unsigned long arg) 1989 { 1990 return fuse_ioctl_common(file, cmd, arg, 0); 1991 } 1992 1993 static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, 1994 unsigned long arg) 1995 { 1996 return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT); 1997 } 1998 1999 /* 2000 * All files which have been polled are linked to RB tree 2001 * fuse_conn->polled_files which is indexed by kh. Walk the tree and 2002 * find the matching one. 2003 */ 2004 static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh, 2005 struct rb_node **parent_out) 2006 { 2007 struct rb_node **link = &fc->polled_files.rb_node; 2008 struct rb_node *last = NULL; 2009 2010 while (*link) { 2011 struct fuse_file *ff; 2012 2013 last = *link; 2014 ff = rb_entry(last, struct fuse_file, polled_node); 2015 2016 if (kh < ff->kh) 2017 link = &last->rb_left; 2018 else if (kh > ff->kh) 2019 link = &last->rb_right; 2020 else 2021 return link; 2022 } 2023 2024 if (parent_out) 2025 *parent_out = last; 2026 return link; 2027 } 2028 2029 /* 2030 * The file is about to be polled. Make sure it's on the polled_files 2031 * RB tree. Note that files once added to the polled_files tree are 2032 * not removed before the file is released. This is because a file 2033 * polled once is likely to be polled again. 2034 */ 2035 static void fuse_register_polled_file(struct fuse_conn *fc, 2036 struct fuse_file *ff) 2037 { 2038 spin_lock(&fc->lock); 2039 if (RB_EMPTY_NODE(&ff->polled_node)) { 2040 struct rb_node **link, *parent; 2041 2042 link = fuse_find_polled_node(fc, ff->kh, &parent); 2043 BUG_ON(*link); 2044 rb_link_node(&ff->polled_node, parent, link); 2045 rb_insert_color(&ff->polled_node, &fc->polled_files); 2046 } 2047 spin_unlock(&fc->lock); 2048 } 2049 2050 unsigned fuse_file_poll(struct file *file, poll_table *wait) 2051 { 2052 struct fuse_file *ff = file->private_data; 2053 struct fuse_conn *fc = ff->fc; 2054 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; 2055 struct fuse_poll_out outarg; 2056 struct fuse_req *req; 2057 int err; 2058 2059 if (fc->no_poll) 2060 return DEFAULT_POLLMASK; 2061 2062 poll_wait(file, &ff->poll_wait, wait); 2063 2064 /* 2065 * Ask for notification iff there's someone waiting for it. 2066 * The client may ignore the flag and always notify. 2067 */ 2068 if (waitqueue_active(&ff->poll_wait)) { 2069 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; 2070 fuse_register_polled_file(fc, ff); 2071 } 2072 2073 req = fuse_get_req(fc); 2074 if (IS_ERR(req)) 2075 return POLLERR; 2076 2077 req->in.h.opcode = FUSE_POLL; 2078 req->in.h.nodeid = ff->nodeid; 2079 req->in.numargs = 1; 2080 req->in.args[0].size = sizeof(inarg); 2081 req->in.args[0].value = &inarg; 2082 req->out.numargs = 1; 2083 req->out.args[0].size = sizeof(outarg); 2084 req->out.args[0].value = &outarg; 2085 fuse_request_send(fc, req); 2086 err = req->out.h.error; 2087 fuse_put_request(fc, req); 2088 2089 if (!err) 2090 return outarg.revents; 2091 if (err == -ENOSYS) { 2092 fc->no_poll = 1; 2093 return DEFAULT_POLLMASK; 2094 } 2095 return POLLERR; 2096 } 2097 EXPORT_SYMBOL_GPL(fuse_file_poll); 2098 2099 /* 2100 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and 2101 * wakes up the poll waiters. 2102 */ 2103 int fuse_notify_poll_wakeup(struct fuse_conn *fc, 2104 struct fuse_notify_poll_wakeup_out *outarg) 2105 { 2106 u64 kh = outarg->kh; 2107 struct rb_node **link; 2108 2109 spin_lock(&fc->lock); 2110 2111 link = fuse_find_polled_node(fc, kh, NULL); 2112 if (*link) { 2113 struct fuse_file *ff; 2114 2115 ff = rb_entry(*link, struct fuse_file, polled_node); 2116 wake_up_interruptible_sync(&ff->poll_wait); 2117 } 2118 2119 spin_unlock(&fc->lock); 2120 return 0; 2121 } 2122 2123 static ssize_t fuse_loop_dio(struct file *filp, const struct iovec *iov, 2124 unsigned long nr_segs, loff_t *ppos, int rw) 2125 { 2126 const struct iovec *vector = iov; 2127 ssize_t ret = 0; 2128 2129 while (nr_segs > 0) { 2130 void __user *base; 2131 size_t len; 2132 ssize_t nr; 2133 2134 base = vector->iov_base; 2135 len = vector->iov_len; 2136 vector++; 2137 nr_segs--; 2138 2139 if (rw == WRITE) 2140 nr = __fuse_direct_write(filp, base, len, ppos); 2141 else 2142 nr = fuse_direct_read(filp, base, len, ppos); 2143 2144 if (nr < 0) { 2145 if (!ret) 2146 ret = nr; 2147 break; 2148 } 2149 ret += nr; 2150 if (nr != len) 2151 break; 2152 } 2153 2154 return ret; 2155 } 2156 2157 2158 static ssize_t 2159 fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 2160 loff_t offset, unsigned long nr_segs) 2161 { 2162 ssize_t ret = 0; 2163 struct file *file = NULL; 2164 loff_t pos = 0; 2165 2166 file = iocb->ki_filp; 2167 pos = offset; 2168 2169 ret = fuse_loop_dio(file, iov, nr_segs, &pos, rw); 2170 2171 return ret; 2172 } 2173 2174 static const struct file_operations fuse_file_operations = { 2175 .llseek = fuse_file_llseek, 2176 .read = do_sync_read, 2177 .aio_read = fuse_file_aio_read, 2178 .write = do_sync_write, 2179 .aio_write = fuse_file_aio_write, 2180 .mmap = fuse_file_mmap, 2181 .open = fuse_open, 2182 .flush = fuse_flush, 2183 .release = fuse_release, 2184 .fsync = fuse_fsync, 2185 .lock = fuse_file_lock, 2186 .flock = fuse_file_flock, 2187 .splice_read = generic_file_splice_read, 2188 .unlocked_ioctl = fuse_file_ioctl, 2189 .compat_ioctl = fuse_file_compat_ioctl, 2190 .poll = fuse_file_poll, 2191 }; 2192 2193 static const struct file_operations fuse_direct_io_file_operations = { 2194 .llseek = fuse_file_llseek, 2195 .read = fuse_direct_read, 2196 .write = fuse_direct_write, 2197 .mmap = fuse_direct_mmap, 2198 .open = fuse_open, 2199 .flush = fuse_flush, 2200 .release = fuse_release, 2201 .fsync = fuse_fsync, 2202 .lock = fuse_file_lock, 2203 .flock = fuse_file_flock, 2204 .unlocked_ioctl = fuse_file_ioctl, 2205 .compat_ioctl = fuse_file_compat_ioctl, 2206 .poll = fuse_file_poll, 2207 /* no splice_read */ 2208 }; 2209 2210 static const struct address_space_operations fuse_file_aops = { 2211 .readpage = fuse_readpage, 2212 .writepage = fuse_writepage, 2213 .launder_page = fuse_launder_page, 2214 .readpages = fuse_readpages, 2215 .set_page_dirty = __set_page_dirty_nobuffers, 2216 .bmap = fuse_bmap, 2217 .direct_IO = fuse_direct_IO, 2218 }; 2219 2220 void fuse_init_file_inode(struct inode *inode) 2221 { 2222 inode->i_fop = &fuse_file_operations; 2223 inode->i_data.a_ops = &fuse_file_aops; 2224 } 2225