1 /* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7 */ 8 9 #include "fuse_i.h" 10 11 #include <linux/pagemap.h> 12 #include <linux/slab.h> 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/module.h> 16 #include <linux/compat.h> 17 #include <linux/swap.h> 18 #include <linux/aio.h> 19 #include <linux/falloc.h> 20 21 static const struct file_operations fuse_direct_io_file_operations; 22 23 static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file, 24 int opcode, struct fuse_open_out *outargp) 25 { 26 struct fuse_open_in inarg; 27 struct fuse_req *req; 28 int err; 29 30 req = fuse_get_req_nopages(fc); 31 if (IS_ERR(req)) 32 return PTR_ERR(req); 33 34 memset(&inarg, 0, sizeof(inarg)); 35 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); 36 if (!fc->atomic_o_trunc) 37 inarg.flags &= ~O_TRUNC; 38 req->in.h.opcode = opcode; 39 req->in.h.nodeid = nodeid; 40 req->in.numargs = 1; 41 req->in.args[0].size = sizeof(inarg); 42 req->in.args[0].value = &inarg; 43 req->out.numargs = 1; 44 req->out.args[0].size = sizeof(*outargp); 45 req->out.args[0].value = outargp; 46 fuse_request_send(fc, req); 47 err = req->out.h.error; 48 fuse_put_request(fc, req); 49 50 return err; 51 } 52 53 struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) 54 { 55 struct fuse_file *ff; 56 57 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); 58 if (unlikely(!ff)) 59 return NULL; 60 61 ff->fc = fc; 62 ff->reserved_req = fuse_request_alloc(0); 63 if (unlikely(!ff->reserved_req)) { 64 kfree(ff); 65 return NULL; 66 } 67 68 INIT_LIST_HEAD(&ff->write_entry); 69 atomic_set(&ff->count, 0); 70 RB_CLEAR_NODE(&ff->polled_node); 71 init_waitqueue_head(&ff->poll_wait); 72 73 spin_lock(&fc->lock); 74 ff->kh = ++fc->khctr; 75 spin_unlock(&fc->lock); 76 77 return ff; 78 } 79 80 void fuse_file_free(struct fuse_file *ff) 81 { 82 fuse_request_free(ff->reserved_req); 83 kfree(ff); 84 } 85 86 struct fuse_file *fuse_file_get(struct fuse_file *ff) 87 { 88 atomic_inc(&ff->count); 89 return ff; 90 } 91 92 static void fuse_release_async(struct work_struct *work) 93 { 94 struct fuse_req *req; 95 struct fuse_conn *fc; 96 struct path path; 97 98 req = container_of(work, struct fuse_req, misc.release.work); 99 path = req->misc.release.path; 100 fc = get_fuse_conn(path.dentry->d_inode); 101 102 fuse_put_request(fc, req); 103 path_put(&path); 104 } 105 106 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) 107 { 108 if (fc->destroy_req) { 109 /* 110 * If this is a fuseblk mount, then it's possible that 111 * releasing the path will result in releasing the 112 * super block and sending the DESTROY request. If 113 * the server is single threaded, this would hang. 114 * For this reason do the path_put() in a separate 115 * thread. 116 */ 117 atomic_inc(&req->count); 118 INIT_WORK(&req->misc.release.work, fuse_release_async); 119 schedule_work(&req->misc.release.work); 120 } else { 121 path_put(&req->misc.release.path); 122 } 123 } 124 125 static void fuse_file_put(struct fuse_file *ff, bool sync) 126 { 127 if (atomic_dec_and_test(&ff->count)) { 128 struct fuse_req *req = ff->reserved_req; 129 130 if (sync) { 131 req->background = 0; 132 fuse_request_send(ff->fc, req); 133 path_put(&req->misc.release.path); 134 fuse_put_request(ff->fc, req); 135 } else { 136 req->end = fuse_release_end; 137 req->background = 1; 138 fuse_request_send_background(ff->fc, req); 139 } 140 kfree(ff); 141 } 142 } 143 144 int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, 145 bool isdir) 146 { 147 struct fuse_open_out outarg; 148 struct fuse_file *ff; 149 int err; 150 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; 151 152 ff = fuse_file_alloc(fc); 153 if (!ff) 154 return -ENOMEM; 155 156 err = fuse_send_open(fc, nodeid, file, opcode, &outarg); 157 if (err) { 158 fuse_file_free(ff); 159 return err; 160 } 161 162 if (isdir) 163 outarg.open_flags &= ~FOPEN_DIRECT_IO; 164 165 ff->fh = outarg.fh; 166 ff->nodeid = nodeid; 167 ff->open_flags = outarg.open_flags; 168 file->private_data = fuse_file_get(ff); 169 170 return 0; 171 } 172 EXPORT_SYMBOL_GPL(fuse_do_open); 173 174 void fuse_finish_open(struct inode *inode, struct file *file) 175 { 176 struct fuse_file *ff = file->private_data; 177 struct fuse_conn *fc = get_fuse_conn(inode); 178 179 if (ff->open_flags & FOPEN_DIRECT_IO) 180 file->f_op = &fuse_direct_io_file_operations; 181 if (!(ff->open_flags & FOPEN_KEEP_CACHE)) 182 invalidate_inode_pages2(inode->i_mapping); 183 if (ff->open_flags & FOPEN_NONSEEKABLE) 184 nonseekable_open(inode, file); 185 if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { 186 struct fuse_inode *fi = get_fuse_inode(inode); 187 188 spin_lock(&fc->lock); 189 fi->attr_version = ++fc->attr_version; 190 i_size_write(inode, 0); 191 spin_unlock(&fc->lock); 192 fuse_invalidate_attr(inode); 193 } 194 } 195 196 int fuse_open_common(struct inode *inode, struct file *file, bool isdir) 197 { 198 struct fuse_conn *fc = get_fuse_conn(inode); 199 int err; 200 201 err = generic_file_open(inode, file); 202 if (err) 203 return err; 204 205 err = fuse_do_open(fc, get_node_id(inode), file, isdir); 206 if (err) 207 return err; 208 209 fuse_finish_open(inode, file); 210 211 return 0; 212 } 213 214 static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode) 215 { 216 struct fuse_conn *fc = ff->fc; 217 struct fuse_req *req = ff->reserved_req; 218 struct fuse_release_in *inarg = &req->misc.release.in; 219 220 spin_lock(&fc->lock); 221 list_del(&ff->write_entry); 222 if (!RB_EMPTY_NODE(&ff->polled_node)) 223 rb_erase(&ff->polled_node, &fc->polled_files); 224 spin_unlock(&fc->lock); 225 226 wake_up_interruptible_all(&ff->poll_wait); 227 228 inarg->fh = ff->fh; 229 inarg->flags = flags; 230 req->in.h.opcode = opcode; 231 req->in.h.nodeid = ff->nodeid; 232 req->in.numargs = 1; 233 req->in.args[0].size = sizeof(struct fuse_release_in); 234 req->in.args[0].value = inarg; 235 } 236 237 void fuse_release_common(struct file *file, int opcode) 238 { 239 struct fuse_file *ff; 240 struct fuse_req *req; 241 242 ff = file->private_data; 243 if (unlikely(!ff)) 244 return; 245 246 req = ff->reserved_req; 247 fuse_prepare_release(ff, file->f_flags, opcode); 248 249 if (ff->flock) { 250 struct fuse_release_in *inarg = &req->misc.release.in; 251 inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; 252 inarg->lock_owner = fuse_lock_owner_id(ff->fc, 253 (fl_owner_t) file); 254 } 255 /* Hold vfsmount and dentry until release is finished */ 256 path_get(&file->f_path); 257 req->misc.release.path = file->f_path; 258 259 /* 260 * Normally this will send the RELEASE request, however if 261 * some asynchronous READ or WRITE requests are outstanding, 262 * the sending will be delayed. 263 * 264 * Make the release synchronous if this is a fuseblk mount, 265 * synchronous RELEASE is allowed (and desirable) in this case 266 * because the server can be trusted not to screw up. 267 */ 268 fuse_file_put(ff, ff->fc->destroy_req != NULL); 269 } 270 271 static int fuse_open(struct inode *inode, struct file *file) 272 { 273 return fuse_open_common(inode, file, false); 274 } 275 276 static int fuse_release(struct inode *inode, struct file *file) 277 { 278 fuse_release_common(file, FUSE_RELEASE); 279 280 /* return value is ignored by VFS */ 281 return 0; 282 } 283 284 void fuse_sync_release(struct fuse_file *ff, int flags) 285 { 286 WARN_ON(atomic_read(&ff->count) > 1); 287 fuse_prepare_release(ff, flags, FUSE_RELEASE); 288 ff->reserved_req->force = 1; 289 ff->reserved_req->background = 0; 290 fuse_request_send(ff->fc, ff->reserved_req); 291 fuse_put_request(ff->fc, ff->reserved_req); 292 kfree(ff); 293 } 294 EXPORT_SYMBOL_GPL(fuse_sync_release); 295 296 /* 297 * Scramble the ID space with XTEA, so that the value of the files_struct 298 * pointer is not exposed to userspace. 299 */ 300 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) 301 { 302 u32 *k = fc->scramble_key; 303 u64 v = (unsigned long) id; 304 u32 v0 = v; 305 u32 v1 = v >> 32; 306 u32 sum = 0; 307 int i; 308 309 for (i = 0; i < 32; i++) { 310 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); 311 sum += 0x9E3779B9; 312 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); 313 } 314 315 return (u64) v0 + ((u64) v1 << 32); 316 } 317 318 /* 319 * Check if page is under writeback 320 * 321 * This is currently done by walking the list of writepage requests 322 * for the inode, which can be pretty inefficient. 323 */ 324 static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index) 325 { 326 struct fuse_conn *fc = get_fuse_conn(inode); 327 struct fuse_inode *fi = get_fuse_inode(inode); 328 struct fuse_req *req; 329 bool found = false; 330 331 spin_lock(&fc->lock); 332 list_for_each_entry(req, &fi->writepages, writepages_entry) { 333 pgoff_t curr_index; 334 335 BUG_ON(req->inode != inode); 336 curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT; 337 if (curr_index == index) { 338 found = true; 339 break; 340 } 341 } 342 spin_unlock(&fc->lock); 343 344 return found; 345 } 346 347 /* 348 * Wait for page writeback to be completed. 349 * 350 * Since fuse doesn't rely on the VM writeback tracking, this has to 351 * use some other means. 352 */ 353 static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) 354 { 355 struct fuse_inode *fi = get_fuse_inode(inode); 356 357 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); 358 return 0; 359 } 360 361 static int fuse_flush(struct file *file, fl_owner_t id) 362 { 363 struct inode *inode = file_inode(file); 364 struct fuse_conn *fc = get_fuse_conn(inode); 365 struct fuse_file *ff = file->private_data; 366 struct fuse_req *req; 367 struct fuse_flush_in inarg; 368 int err; 369 370 if (is_bad_inode(inode)) 371 return -EIO; 372 373 if (fc->no_flush) 374 return 0; 375 376 req = fuse_get_req_nofail_nopages(fc, file); 377 memset(&inarg, 0, sizeof(inarg)); 378 inarg.fh = ff->fh; 379 inarg.lock_owner = fuse_lock_owner_id(fc, id); 380 req->in.h.opcode = FUSE_FLUSH; 381 req->in.h.nodeid = get_node_id(inode); 382 req->in.numargs = 1; 383 req->in.args[0].size = sizeof(inarg); 384 req->in.args[0].value = &inarg; 385 req->force = 1; 386 fuse_request_send(fc, req); 387 err = req->out.h.error; 388 fuse_put_request(fc, req); 389 if (err == -ENOSYS) { 390 fc->no_flush = 1; 391 err = 0; 392 } 393 return err; 394 } 395 396 /* 397 * Wait for all pending writepages on the inode to finish. 398 * 399 * This is currently done by blocking further writes with FUSE_NOWRITE 400 * and waiting for all sent writes to complete. 401 * 402 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage 403 * could conflict with truncation. 404 */ 405 static void fuse_sync_writes(struct inode *inode) 406 { 407 fuse_set_nowrite(inode); 408 fuse_release_nowrite(inode); 409 } 410 411 int fuse_fsync_common(struct file *file, loff_t start, loff_t end, 412 int datasync, int isdir) 413 { 414 struct inode *inode = file->f_mapping->host; 415 struct fuse_conn *fc = get_fuse_conn(inode); 416 struct fuse_file *ff = file->private_data; 417 struct fuse_req *req; 418 struct fuse_fsync_in inarg; 419 int err; 420 421 if (is_bad_inode(inode)) 422 return -EIO; 423 424 err = filemap_write_and_wait_range(inode->i_mapping, start, end); 425 if (err) 426 return err; 427 428 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) 429 return 0; 430 431 mutex_lock(&inode->i_mutex); 432 433 /* 434 * Start writeback against all dirty pages of the inode, then 435 * wait for all outstanding writes, before sending the FSYNC 436 * request. 437 */ 438 err = write_inode_now(inode, 0); 439 if (err) 440 goto out; 441 442 fuse_sync_writes(inode); 443 444 req = fuse_get_req_nopages(fc); 445 if (IS_ERR(req)) { 446 err = PTR_ERR(req); 447 goto out; 448 } 449 450 memset(&inarg, 0, sizeof(inarg)); 451 inarg.fh = ff->fh; 452 inarg.fsync_flags = datasync ? 1 : 0; 453 req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; 454 req->in.h.nodeid = get_node_id(inode); 455 req->in.numargs = 1; 456 req->in.args[0].size = sizeof(inarg); 457 req->in.args[0].value = &inarg; 458 fuse_request_send(fc, req); 459 err = req->out.h.error; 460 fuse_put_request(fc, req); 461 if (err == -ENOSYS) { 462 if (isdir) 463 fc->no_fsyncdir = 1; 464 else 465 fc->no_fsync = 1; 466 err = 0; 467 } 468 out: 469 mutex_unlock(&inode->i_mutex); 470 return err; 471 } 472 473 static int fuse_fsync(struct file *file, loff_t start, loff_t end, 474 int datasync) 475 { 476 return fuse_fsync_common(file, start, end, datasync, 0); 477 } 478 479 void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, 480 size_t count, int opcode) 481 { 482 struct fuse_read_in *inarg = &req->misc.read.in; 483 struct fuse_file *ff = file->private_data; 484 485 inarg->fh = ff->fh; 486 inarg->offset = pos; 487 inarg->size = count; 488 inarg->flags = file->f_flags; 489 req->in.h.opcode = opcode; 490 req->in.h.nodeid = ff->nodeid; 491 req->in.numargs = 1; 492 req->in.args[0].size = sizeof(struct fuse_read_in); 493 req->in.args[0].value = inarg; 494 req->out.argvar = 1; 495 req->out.numargs = 1; 496 req->out.args[0].size = count; 497 } 498 499 static void fuse_release_user_pages(struct fuse_req *req, int write) 500 { 501 unsigned i; 502 503 for (i = 0; i < req->num_pages; i++) { 504 struct page *page = req->pages[i]; 505 if (write) 506 set_page_dirty_lock(page); 507 put_page(page); 508 } 509 } 510 511 /** 512 * In case of short read, the caller sets 'pos' to the position of 513 * actual end of fuse request in IO request. Otherwise, if bytes_requested 514 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1. 515 * 516 * An example: 517 * User requested DIO read of 64K. It was splitted into two 32K fuse requests, 518 * both submitted asynchronously. The first of them was ACKed by userspace as 519 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The 520 * second request was ACKed as short, e.g. only 1K was read, resulting in 521 * pos == 33K. 522 * 523 * Thus, when all fuse requests are completed, the minimal non-negative 'pos' 524 * will be equal to the length of the longest contiguous fragment of 525 * transferred data starting from the beginning of IO request. 526 */ 527 static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos) 528 { 529 int left; 530 531 spin_lock(&io->lock); 532 if (err) 533 io->err = io->err ? : err; 534 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes)) 535 io->bytes = pos; 536 537 left = --io->reqs; 538 spin_unlock(&io->lock); 539 540 if (!left) { 541 long res; 542 543 if (io->err) 544 res = io->err; 545 else if (io->bytes >= 0 && io->write) 546 res = -EIO; 547 else { 548 res = io->bytes < 0 ? io->size : io->bytes; 549 550 if (!is_sync_kiocb(io->iocb)) { 551 struct inode *inode = file_inode(io->iocb->ki_filp); 552 struct fuse_conn *fc = get_fuse_conn(inode); 553 struct fuse_inode *fi = get_fuse_inode(inode); 554 555 spin_lock(&fc->lock); 556 fi->attr_version = ++fc->attr_version; 557 spin_unlock(&fc->lock); 558 } 559 } 560 561 aio_complete(io->iocb, res, 0); 562 kfree(io); 563 } 564 } 565 566 static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req) 567 { 568 struct fuse_io_priv *io = req->io; 569 ssize_t pos = -1; 570 571 fuse_release_user_pages(req, !io->write); 572 573 if (io->write) { 574 if (req->misc.write.in.size != req->misc.write.out.size) 575 pos = req->misc.write.in.offset - io->offset + 576 req->misc.write.out.size; 577 } else { 578 if (req->misc.read.in.size != req->out.args[0].size) 579 pos = req->misc.read.in.offset - io->offset + 580 req->out.args[0].size; 581 } 582 583 fuse_aio_complete(io, req->out.h.error, pos); 584 } 585 586 static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req, 587 size_t num_bytes, struct fuse_io_priv *io) 588 { 589 spin_lock(&io->lock); 590 io->size += num_bytes; 591 io->reqs++; 592 spin_unlock(&io->lock); 593 594 req->io = io; 595 req->end = fuse_aio_complete_req; 596 597 __fuse_get_request(req); 598 fuse_request_send_background(fc, req); 599 600 return num_bytes; 601 } 602 603 static size_t fuse_send_read(struct fuse_req *req, struct fuse_io_priv *io, 604 loff_t pos, size_t count, fl_owner_t owner) 605 { 606 struct file *file = io->file; 607 struct fuse_file *ff = file->private_data; 608 struct fuse_conn *fc = ff->fc; 609 610 fuse_read_fill(req, file, pos, count, FUSE_READ); 611 if (owner != NULL) { 612 struct fuse_read_in *inarg = &req->misc.read.in; 613 614 inarg->read_flags |= FUSE_READ_LOCKOWNER; 615 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 616 } 617 618 if (io->async) 619 return fuse_async_req_send(fc, req, count, io); 620 621 fuse_request_send(fc, req); 622 return req->out.args[0].size; 623 } 624 625 static void fuse_read_update_size(struct inode *inode, loff_t size, 626 u64 attr_ver) 627 { 628 struct fuse_conn *fc = get_fuse_conn(inode); 629 struct fuse_inode *fi = get_fuse_inode(inode); 630 631 spin_lock(&fc->lock); 632 if (attr_ver == fi->attr_version && size < inode->i_size) { 633 fi->attr_version = ++fc->attr_version; 634 i_size_write(inode, size); 635 } 636 spin_unlock(&fc->lock); 637 } 638 639 static int fuse_readpage(struct file *file, struct page *page) 640 { 641 struct fuse_io_priv io = { .async = 0, .file = file }; 642 struct inode *inode = page->mapping->host; 643 struct fuse_conn *fc = get_fuse_conn(inode); 644 struct fuse_req *req; 645 size_t num_read; 646 loff_t pos = page_offset(page); 647 size_t count = PAGE_CACHE_SIZE; 648 u64 attr_ver; 649 int err; 650 651 err = -EIO; 652 if (is_bad_inode(inode)) 653 goto out; 654 655 /* 656 * Page writeback can extend beyond the lifetime of the 657 * page-cache page, so make sure we read a properly synced 658 * page. 659 */ 660 fuse_wait_on_page_writeback(inode, page->index); 661 662 req = fuse_get_req(fc, 1); 663 err = PTR_ERR(req); 664 if (IS_ERR(req)) 665 goto out; 666 667 attr_ver = fuse_get_attr_version(fc); 668 669 req->out.page_zeroing = 1; 670 req->out.argpages = 1; 671 req->num_pages = 1; 672 req->pages[0] = page; 673 req->page_descs[0].length = count; 674 num_read = fuse_send_read(req, &io, pos, count, NULL); 675 err = req->out.h.error; 676 fuse_put_request(fc, req); 677 678 if (!err) { 679 /* 680 * Short read means EOF. If file size is larger, truncate it 681 */ 682 if (num_read < count) 683 fuse_read_update_size(inode, pos + num_read, attr_ver); 684 685 SetPageUptodate(page); 686 } 687 688 fuse_invalidate_attr(inode); /* atime changed */ 689 out: 690 unlock_page(page); 691 return err; 692 } 693 694 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) 695 { 696 int i; 697 size_t count = req->misc.read.in.size; 698 size_t num_read = req->out.args[0].size; 699 struct address_space *mapping = NULL; 700 701 for (i = 0; mapping == NULL && i < req->num_pages; i++) 702 mapping = req->pages[i]->mapping; 703 704 if (mapping) { 705 struct inode *inode = mapping->host; 706 707 /* 708 * Short read means EOF. If file size is larger, truncate it 709 */ 710 if (!req->out.h.error && num_read < count) { 711 loff_t pos; 712 713 pos = page_offset(req->pages[0]) + num_read; 714 fuse_read_update_size(inode, pos, 715 req->misc.read.attr_ver); 716 } 717 fuse_invalidate_attr(inode); /* atime changed */ 718 } 719 720 for (i = 0; i < req->num_pages; i++) { 721 struct page *page = req->pages[i]; 722 if (!req->out.h.error) 723 SetPageUptodate(page); 724 else 725 SetPageError(page); 726 unlock_page(page); 727 page_cache_release(page); 728 } 729 if (req->ff) 730 fuse_file_put(req->ff, false); 731 } 732 733 static void fuse_send_readpages(struct fuse_req *req, struct file *file) 734 { 735 struct fuse_file *ff = file->private_data; 736 struct fuse_conn *fc = ff->fc; 737 loff_t pos = page_offset(req->pages[0]); 738 size_t count = req->num_pages << PAGE_CACHE_SHIFT; 739 740 req->out.argpages = 1; 741 req->out.page_zeroing = 1; 742 req->out.page_replace = 1; 743 fuse_read_fill(req, file, pos, count, FUSE_READ); 744 req->misc.read.attr_ver = fuse_get_attr_version(fc); 745 if (fc->async_read) { 746 req->ff = fuse_file_get(ff); 747 req->end = fuse_readpages_end; 748 fuse_request_send_background(fc, req); 749 } else { 750 fuse_request_send(fc, req); 751 fuse_readpages_end(fc, req); 752 fuse_put_request(fc, req); 753 } 754 } 755 756 struct fuse_fill_data { 757 struct fuse_req *req; 758 struct file *file; 759 struct inode *inode; 760 unsigned nr_pages; 761 }; 762 763 static int fuse_readpages_fill(void *_data, struct page *page) 764 { 765 struct fuse_fill_data *data = _data; 766 struct fuse_req *req = data->req; 767 struct inode *inode = data->inode; 768 struct fuse_conn *fc = get_fuse_conn(inode); 769 770 fuse_wait_on_page_writeback(inode, page->index); 771 772 if (req->num_pages && 773 (req->num_pages == FUSE_MAX_PAGES_PER_REQ || 774 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || 775 req->pages[req->num_pages - 1]->index + 1 != page->index)) { 776 int nr_alloc = min_t(unsigned, data->nr_pages, 777 FUSE_MAX_PAGES_PER_REQ); 778 fuse_send_readpages(req, data->file); 779 if (fc->async_read) 780 req = fuse_get_req_for_background(fc, nr_alloc); 781 else 782 req = fuse_get_req(fc, nr_alloc); 783 784 data->req = req; 785 if (IS_ERR(req)) { 786 unlock_page(page); 787 return PTR_ERR(req); 788 } 789 } 790 791 if (WARN_ON(req->num_pages >= req->max_pages)) { 792 fuse_put_request(fc, req); 793 return -EIO; 794 } 795 796 page_cache_get(page); 797 req->pages[req->num_pages] = page; 798 req->page_descs[req->num_pages].length = PAGE_SIZE; 799 req->num_pages++; 800 data->nr_pages--; 801 return 0; 802 } 803 804 static int fuse_readpages(struct file *file, struct address_space *mapping, 805 struct list_head *pages, unsigned nr_pages) 806 { 807 struct inode *inode = mapping->host; 808 struct fuse_conn *fc = get_fuse_conn(inode); 809 struct fuse_fill_data data; 810 int err; 811 int nr_alloc = min_t(unsigned, nr_pages, FUSE_MAX_PAGES_PER_REQ); 812 813 err = -EIO; 814 if (is_bad_inode(inode)) 815 goto out; 816 817 data.file = file; 818 data.inode = inode; 819 if (fc->async_read) 820 data.req = fuse_get_req_for_background(fc, nr_alloc); 821 else 822 data.req = fuse_get_req(fc, nr_alloc); 823 data.nr_pages = nr_pages; 824 err = PTR_ERR(data.req); 825 if (IS_ERR(data.req)) 826 goto out; 827 828 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); 829 if (!err) { 830 if (data.req->num_pages) 831 fuse_send_readpages(data.req, file); 832 else 833 fuse_put_request(fc, data.req); 834 } 835 out: 836 return err; 837 } 838 839 static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov, 840 unsigned long nr_segs, loff_t pos) 841 { 842 struct inode *inode = iocb->ki_filp->f_mapping->host; 843 struct fuse_conn *fc = get_fuse_conn(inode); 844 845 /* 846 * In auto invalidate mode, always update attributes on read. 847 * Otherwise, only update if we attempt to read past EOF (to ensure 848 * i_size is up to date). 849 */ 850 if (fc->auto_inval_data || 851 (pos + iov_length(iov, nr_segs) > i_size_read(inode))) { 852 int err; 853 err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL); 854 if (err) 855 return err; 856 } 857 858 return generic_file_aio_read(iocb, iov, nr_segs, pos); 859 } 860 861 static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff, 862 loff_t pos, size_t count) 863 { 864 struct fuse_write_in *inarg = &req->misc.write.in; 865 struct fuse_write_out *outarg = &req->misc.write.out; 866 867 inarg->fh = ff->fh; 868 inarg->offset = pos; 869 inarg->size = count; 870 req->in.h.opcode = FUSE_WRITE; 871 req->in.h.nodeid = ff->nodeid; 872 req->in.numargs = 2; 873 if (ff->fc->minor < 9) 874 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; 875 else 876 req->in.args[0].size = sizeof(struct fuse_write_in); 877 req->in.args[0].value = inarg; 878 req->in.args[1].size = count; 879 req->out.numargs = 1; 880 req->out.args[0].size = sizeof(struct fuse_write_out); 881 req->out.args[0].value = outarg; 882 } 883 884 static size_t fuse_send_write(struct fuse_req *req, struct fuse_io_priv *io, 885 loff_t pos, size_t count, fl_owner_t owner) 886 { 887 struct file *file = io->file; 888 struct fuse_file *ff = file->private_data; 889 struct fuse_conn *fc = ff->fc; 890 struct fuse_write_in *inarg = &req->misc.write.in; 891 892 fuse_write_fill(req, ff, pos, count); 893 inarg->flags = file->f_flags; 894 if (owner != NULL) { 895 inarg->write_flags |= FUSE_WRITE_LOCKOWNER; 896 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 897 } 898 899 if (io->async) 900 return fuse_async_req_send(fc, req, count, io); 901 902 fuse_request_send(fc, req); 903 return req->misc.write.out.size; 904 } 905 906 void fuse_write_update_size(struct inode *inode, loff_t pos) 907 { 908 struct fuse_conn *fc = get_fuse_conn(inode); 909 struct fuse_inode *fi = get_fuse_inode(inode); 910 911 spin_lock(&fc->lock); 912 fi->attr_version = ++fc->attr_version; 913 if (pos > inode->i_size) 914 i_size_write(inode, pos); 915 spin_unlock(&fc->lock); 916 } 917 918 static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, 919 struct inode *inode, loff_t pos, 920 size_t count) 921 { 922 size_t res; 923 unsigned offset; 924 unsigned i; 925 struct fuse_io_priv io = { .async = 0, .file = file }; 926 927 for (i = 0; i < req->num_pages; i++) 928 fuse_wait_on_page_writeback(inode, req->pages[i]->index); 929 930 res = fuse_send_write(req, &io, pos, count, NULL); 931 932 offset = req->page_descs[0].offset; 933 count = res; 934 for (i = 0; i < req->num_pages; i++) { 935 struct page *page = req->pages[i]; 936 937 if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE) 938 SetPageUptodate(page); 939 940 if (count > PAGE_CACHE_SIZE - offset) 941 count -= PAGE_CACHE_SIZE - offset; 942 else 943 count = 0; 944 offset = 0; 945 946 unlock_page(page); 947 page_cache_release(page); 948 } 949 950 return res; 951 } 952 953 static ssize_t fuse_fill_write_pages(struct fuse_req *req, 954 struct address_space *mapping, 955 struct iov_iter *ii, loff_t pos) 956 { 957 struct fuse_conn *fc = get_fuse_conn(mapping->host); 958 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 959 size_t count = 0; 960 int err; 961 962 req->in.argpages = 1; 963 req->page_descs[0].offset = offset; 964 965 do { 966 size_t tmp; 967 struct page *page; 968 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 969 size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, 970 iov_iter_count(ii)); 971 972 bytes = min_t(size_t, bytes, fc->max_write - count); 973 974 again: 975 err = -EFAULT; 976 if (iov_iter_fault_in_readable(ii, bytes)) 977 break; 978 979 err = -ENOMEM; 980 page = grab_cache_page_write_begin(mapping, index, 0); 981 if (!page) 982 break; 983 984 if (mapping_writably_mapped(mapping)) 985 flush_dcache_page(page); 986 987 pagefault_disable(); 988 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); 989 pagefault_enable(); 990 flush_dcache_page(page); 991 992 mark_page_accessed(page); 993 994 if (!tmp) { 995 unlock_page(page); 996 page_cache_release(page); 997 bytes = min(bytes, iov_iter_single_seg_count(ii)); 998 goto again; 999 } 1000 1001 err = 0; 1002 req->pages[req->num_pages] = page; 1003 req->page_descs[req->num_pages].length = tmp; 1004 req->num_pages++; 1005 1006 iov_iter_advance(ii, tmp); 1007 count += tmp; 1008 pos += tmp; 1009 offset += tmp; 1010 if (offset == PAGE_CACHE_SIZE) 1011 offset = 0; 1012 1013 if (!fc->big_writes) 1014 break; 1015 } while (iov_iter_count(ii) && count < fc->max_write && 1016 req->num_pages < req->max_pages && offset == 0); 1017 1018 return count > 0 ? count : err; 1019 } 1020 1021 static inline unsigned fuse_wr_pages(loff_t pos, size_t len) 1022 { 1023 return min_t(unsigned, 1024 ((pos + len - 1) >> PAGE_CACHE_SHIFT) - 1025 (pos >> PAGE_CACHE_SHIFT) + 1, 1026 FUSE_MAX_PAGES_PER_REQ); 1027 } 1028 1029 static ssize_t fuse_perform_write(struct file *file, 1030 struct address_space *mapping, 1031 struct iov_iter *ii, loff_t pos) 1032 { 1033 struct inode *inode = mapping->host; 1034 struct fuse_conn *fc = get_fuse_conn(inode); 1035 int err = 0; 1036 ssize_t res = 0; 1037 1038 if (is_bad_inode(inode)) 1039 return -EIO; 1040 1041 do { 1042 struct fuse_req *req; 1043 ssize_t count; 1044 unsigned nr_pages = fuse_wr_pages(pos, iov_iter_count(ii)); 1045 1046 req = fuse_get_req(fc, nr_pages); 1047 if (IS_ERR(req)) { 1048 err = PTR_ERR(req); 1049 break; 1050 } 1051 1052 count = fuse_fill_write_pages(req, mapping, ii, pos); 1053 if (count <= 0) { 1054 err = count; 1055 } else { 1056 size_t num_written; 1057 1058 num_written = fuse_send_write_pages(req, file, inode, 1059 pos, count); 1060 err = req->out.h.error; 1061 if (!err) { 1062 res += num_written; 1063 pos += num_written; 1064 1065 /* break out of the loop on short write */ 1066 if (num_written != count) 1067 err = -EIO; 1068 } 1069 } 1070 fuse_put_request(fc, req); 1071 } while (!err && iov_iter_count(ii)); 1072 1073 if (res > 0) 1074 fuse_write_update_size(inode, pos); 1075 1076 fuse_invalidate_attr(inode); 1077 1078 return res > 0 ? res : err; 1079 } 1080 1081 static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov, 1082 unsigned long nr_segs, loff_t pos) 1083 { 1084 struct file *file = iocb->ki_filp; 1085 struct address_space *mapping = file->f_mapping; 1086 size_t count = 0; 1087 size_t ocount = 0; 1088 ssize_t written = 0; 1089 ssize_t written_buffered = 0; 1090 struct inode *inode = mapping->host; 1091 ssize_t err; 1092 struct iov_iter i; 1093 loff_t endbyte = 0; 1094 1095 WARN_ON(iocb->ki_pos != pos); 1096 1097 ocount = 0; 1098 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); 1099 if (err) 1100 return err; 1101 1102 count = ocount; 1103 mutex_lock(&inode->i_mutex); 1104 1105 /* We can write back this queue in page reclaim */ 1106 current->backing_dev_info = mapping->backing_dev_info; 1107 1108 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 1109 if (err) 1110 goto out; 1111 1112 if (count == 0) 1113 goto out; 1114 1115 err = file_remove_suid(file); 1116 if (err) 1117 goto out; 1118 1119 err = file_update_time(file); 1120 if (err) 1121 goto out; 1122 1123 if (file->f_flags & O_DIRECT) { 1124 written = generic_file_direct_write(iocb, iov, &nr_segs, 1125 pos, &iocb->ki_pos, 1126 count, ocount); 1127 if (written < 0 || written == count) 1128 goto out; 1129 1130 pos += written; 1131 count -= written; 1132 1133 iov_iter_init(&i, iov, nr_segs, count, written); 1134 written_buffered = fuse_perform_write(file, mapping, &i, pos); 1135 if (written_buffered < 0) { 1136 err = written_buffered; 1137 goto out; 1138 } 1139 endbyte = pos + written_buffered - 1; 1140 1141 err = filemap_write_and_wait_range(file->f_mapping, pos, 1142 endbyte); 1143 if (err) 1144 goto out; 1145 1146 invalidate_mapping_pages(file->f_mapping, 1147 pos >> PAGE_CACHE_SHIFT, 1148 endbyte >> PAGE_CACHE_SHIFT); 1149 1150 written += written_buffered; 1151 iocb->ki_pos = pos + written_buffered; 1152 } else { 1153 iov_iter_init(&i, iov, nr_segs, count, 0); 1154 written = fuse_perform_write(file, mapping, &i, pos); 1155 if (written >= 0) 1156 iocb->ki_pos = pos + written; 1157 } 1158 out: 1159 current->backing_dev_info = NULL; 1160 mutex_unlock(&inode->i_mutex); 1161 1162 return written ? written : err; 1163 } 1164 1165 static inline void fuse_page_descs_length_init(struct fuse_req *req, 1166 unsigned index, unsigned nr_pages) 1167 { 1168 int i; 1169 1170 for (i = index; i < index + nr_pages; i++) 1171 req->page_descs[i].length = PAGE_SIZE - 1172 req->page_descs[i].offset; 1173 } 1174 1175 static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii) 1176 { 1177 return (unsigned long)ii->iov->iov_base + ii->iov_offset; 1178 } 1179 1180 static inline size_t fuse_get_frag_size(const struct iov_iter *ii, 1181 size_t max_size) 1182 { 1183 return min(iov_iter_single_seg_count(ii), max_size); 1184 } 1185 1186 static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii, 1187 size_t *nbytesp, int write) 1188 { 1189 size_t nbytes = 0; /* # bytes already packed in req */ 1190 1191 /* Special case for kernel I/O: can copy directly into the buffer */ 1192 if (segment_eq(get_fs(), KERNEL_DS)) { 1193 unsigned long user_addr = fuse_get_user_addr(ii); 1194 size_t frag_size = fuse_get_frag_size(ii, *nbytesp); 1195 1196 if (write) 1197 req->in.args[1].value = (void *) user_addr; 1198 else 1199 req->out.args[0].value = (void *) user_addr; 1200 1201 iov_iter_advance(ii, frag_size); 1202 *nbytesp = frag_size; 1203 return 0; 1204 } 1205 1206 while (nbytes < *nbytesp && req->num_pages < req->max_pages) { 1207 unsigned npages; 1208 unsigned long user_addr = fuse_get_user_addr(ii); 1209 unsigned offset = user_addr & ~PAGE_MASK; 1210 size_t frag_size = fuse_get_frag_size(ii, *nbytesp - nbytes); 1211 int ret; 1212 1213 unsigned n = req->max_pages - req->num_pages; 1214 frag_size = min_t(size_t, frag_size, n << PAGE_SHIFT); 1215 1216 npages = (frag_size + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 1217 npages = clamp(npages, 1U, n); 1218 1219 ret = get_user_pages_fast(user_addr, npages, !write, 1220 &req->pages[req->num_pages]); 1221 if (ret < 0) 1222 return ret; 1223 1224 npages = ret; 1225 frag_size = min_t(size_t, frag_size, 1226 (npages << PAGE_SHIFT) - offset); 1227 iov_iter_advance(ii, frag_size); 1228 1229 req->page_descs[req->num_pages].offset = offset; 1230 fuse_page_descs_length_init(req, req->num_pages, npages); 1231 1232 req->num_pages += npages; 1233 req->page_descs[req->num_pages - 1].length -= 1234 (npages << PAGE_SHIFT) - offset - frag_size; 1235 1236 nbytes += frag_size; 1237 } 1238 1239 if (write) 1240 req->in.argpages = 1; 1241 else 1242 req->out.argpages = 1; 1243 1244 *nbytesp = nbytes; 1245 1246 return 0; 1247 } 1248 1249 static inline int fuse_iter_npages(const struct iov_iter *ii_p) 1250 { 1251 struct iov_iter ii = *ii_p; 1252 int npages = 0; 1253 1254 while (iov_iter_count(&ii) && npages < FUSE_MAX_PAGES_PER_REQ) { 1255 unsigned long user_addr = fuse_get_user_addr(&ii); 1256 unsigned offset = user_addr & ~PAGE_MASK; 1257 size_t frag_size = iov_iter_single_seg_count(&ii); 1258 1259 npages += (frag_size + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 1260 iov_iter_advance(&ii, frag_size); 1261 } 1262 1263 return min(npages, FUSE_MAX_PAGES_PER_REQ); 1264 } 1265 1266 ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov, 1267 unsigned long nr_segs, size_t count, loff_t *ppos, 1268 int write) 1269 { 1270 struct file *file = io->file; 1271 struct fuse_file *ff = file->private_data; 1272 struct fuse_conn *fc = ff->fc; 1273 size_t nmax = write ? fc->max_write : fc->max_read; 1274 loff_t pos = *ppos; 1275 ssize_t res = 0; 1276 struct fuse_req *req; 1277 struct iov_iter ii; 1278 1279 iov_iter_init(&ii, iov, nr_segs, count, 0); 1280 1281 if (io->async) 1282 req = fuse_get_req_for_background(fc, fuse_iter_npages(&ii)); 1283 else 1284 req = fuse_get_req(fc, fuse_iter_npages(&ii)); 1285 if (IS_ERR(req)) 1286 return PTR_ERR(req); 1287 1288 while (count) { 1289 size_t nres; 1290 fl_owner_t owner = current->files; 1291 size_t nbytes = min(count, nmax); 1292 int err = fuse_get_user_pages(req, &ii, &nbytes, write); 1293 if (err) { 1294 res = err; 1295 break; 1296 } 1297 1298 if (write) 1299 nres = fuse_send_write(req, io, pos, nbytes, owner); 1300 else 1301 nres = fuse_send_read(req, io, pos, nbytes, owner); 1302 1303 if (!io->async) 1304 fuse_release_user_pages(req, !write); 1305 if (req->out.h.error) { 1306 if (!res) 1307 res = req->out.h.error; 1308 break; 1309 } else if (nres > nbytes) { 1310 res = -EIO; 1311 break; 1312 } 1313 count -= nres; 1314 res += nres; 1315 pos += nres; 1316 if (nres != nbytes) 1317 break; 1318 if (count) { 1319 fuse_put_request(fc, req); 1320 if (io->async) 1321 req = fuse_get_req_for_background(fc, 1322 fuse_iter_npages(&ii)); 1323 else 1324 req = fuse_get_req(fc, fuse_iter_npages(&ii)); 1325 if (IS_ERR(req)) 1326 break; 1327 } 1328 } 1329 if (!IS_ERR(req)) 1330 fuse_put_request(fc, req); 1331 if (res > 0) 1332 *ppos = pos; 1333 1334 return res; 1335 } 1336 EXPORT_SYMBOL_GPL(fuse_direct_io); 1337 1338 static ssize_t __fuse_direct_read(struct fuse_io_priv *io, 1339 const struct iovec *iov, 1340 unsigned long nr_segs, loff_t *ppos, 1341 size_t count) 1342 { 1343 ssize_t res; 1344 struct file *file = io->file; 1345 struct inode *inode = file_inode(file); 1346 1347 if (is_bad_inode(inode)) 1348 return -EIO; 1349 1350 res = fuse_direct_io(io, iov, nr_segs, count, ppos, 0); 1351 1352 fuse_invalidate_attr(inode); 1353 1354 return res; 1355 } 1356 1357 static ssize_t fuse_direct_read(struct file *file, char __user *buf, 1358 size_t count, loff_t *ppos) 1359 { 1360 struct fuse_io_priv io = { .async = 0, .file = file }; 1361 struct iovec iov = { .iov_base = buf, .iov_len = count }; 1362 return __fuse_direct_read(&io, &iov, 1, ppos, count); 1363 } 1364 1365 static ssize_t __fuse_direct_write(struct fuse_io_priv *io, 1366 const struct iovec *iov, 1367 unsigned long nr_segs, loff_t *ppos) 1368 { 1369 struct file *file = io->file; 1370 struct inode *inode = file_inode(file); 1371 size_t count = iov_length(iov, nr_segs); 1372 ssize_t res; 1373 1374 res = generic_write_checks(file, ppos, &count, 0); 1375 if (!res) 1376 res = fuse_direct_io(io, iov, nr_segs, count, ppos, 1); 1377 1378 fuse_invalidate_attr(inode); 1379 1380 return res; 1381 } 1382 1383 static ssize_t fuse_direct_write(struct file *file, const char __user *buf, 1384 size_t count, loff_t *ppos) 1385 { 1386 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count }; 1387 struct inode *inode = file_inode(file); 1388 ssize_t res; 1389 struct fuse_io_priv io = { .async = 0, .file = file }; 1390 1391 if (is_bad_inode(inode)) 1392 return -EIO; 1393 1394 /* Don't allow parallel writes to the same file */ 1395 mutex_lock(&inode->i_mutex); 1396 res = __fuse_direct_write(&io, &iov, 1, ppos); 1397 if (res > 0) 1398 fuse_write_update_size(inode, *ppos); 1399 mutex_unlock(&inode->i_mutex); 1400 1401 return res; 1402 } 1403 1404 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) 1405 { 1406 __free_page(req->pages[0]); 1407 fuse_file_put(req->ff, false); 1408 } 1409 1410 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) 1411 { 1412 struct inode *inode = req->inode; 1413 struct fuse_inode *fi = get_fuse_inode(inode); 1414 struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; 1415 1416 list_del(&req->writepages_entry); 1417 dec_bdi_stat(bdi, BDI_WRITEBACK); 1418 dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP); 1419 bdi_writeout_inc(bdi); 1420 wake_up(&fi->page_waitq); 1421 } 1422 1423 /* Called under fc->lock, may release and reacquire it */ 1424 static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req) 1425 __releases(fc->lock) 1426 __acquires(fc->lock) 1427 { 1428 struct fuse_inode *fi = get_fuse_inode(req->inode); 1429 loff_t size = i_size_read(req->inode); 1430 struct fuse_write_in *inarg = &req->misc.write.in; 1431 1432 if (!fc->connected) 1433 goto out_free; 1434 1435 if (inarg->offset + PAGE_CACHE_SIZE <= size) { 1436 inarg->size = PAGE_CACHE_SIZE; 1437 } else if (inarg->offset < size) { 1438 inarg->size = size & (PAGE_CACHE_SIZE - 1); 1439 } else { 1440 /* Got truncated off completely */ 1441 goto out_free; 1442 } 1443 1444 req->in.args[1].size = inarg->size; 1445 fi->writectr++; 1446 fuse_request_send_background_locked(fc, req); 1447 return; 1448 1449 out_free: 1450 fuse_writepage_finish(fc, req); 1451 spin_unlock(&fc->lock); 1452 fuse_writepage_free(fc, req); 1453 fuse_put_request(fc, req); 1454 spin_lock(&fc->lock); 1455 } 1456 1457 /* 1458 * If fi->writectr is positive (no truncate or fsync going on) send 1459 * all queued writepage requests. 1460 * 1461 * Called with fc->lock 1462 */ 1463 void fuse_flush_writepages(struct inode *inode) 1464 __releases(fc->lock) 1465 __acquires(fc->lock) 1466 { 1467 struct fuse_conn *fc = get_fuse_conn(inode); 1468 struct fuse_inode *fi = get_fuse_inode(inode); 1469 struct fuse_req *req; 1470 1471 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { 1472 req = list_entry(fi->queued_writes.next, struct fuse_req, list); 1473 list_del_init(&req->list); 1474 fuse_send_writepage(fc, req); 1475 } 1476 } 1477 1478 static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req) 1479 { 1480 struct inode *inode = req->inode; 1481 struct fuse_inode *fi = get_fuse_inode(inode); 1482 1483 mapping_set_error(inode->i_mapping, req->out.h.error); 1484 spin_lock(&fc->lock); 1485 fi->writectr--; 1486 fuse_writepage_finish(fc, req); 1487 spin_unlock(&fc->lock); 1488 fuse_writepage_free(fc, req); 1489 } 1490 1491 static int fuse_writepage_locked(struct page *page) 1492 { 1493 struct address_space *mapping = page->mapping; 1494 struct inode *inode = mapping->host; 1495 struct fuse_conn *fc = get_fuse_conn(inode); 1496 struct fuse_inode *fi = get_fuse_inode(inode); 1497 struct fuse_req *req; 1498 struct fuse_file *ff; 1499 struct page *tmp_page; 1500 1501 set_page_writeback(page); 1502 1503 req = fuse_request_alloc_nofs(1); 1504 if (!req) 1505 goto err; 1506 1507 req->background = 1; /* writeback always goes to bg_queue */ 1508 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 1509 if (!tmp_page) 1510 goto err_free; 1511 1512 spin_lock(&fc->lock); 1513 BUG_ON(list_empty(&fi->write_files)); 1514 ff = list_entry(fi->write_files.next, struct fuse_file, write_entry); 1515 req->ff = fuse_file_get(ff); 1516 spin_unlock(&fc->lock); 1517 1518 fuse_write_fill(req, ff, page_offset(page), 0); 1519 1520 copy_highpage(tmp_page, page); 1521 req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; 1522 req->in.argpages = 1; 1523 req->num_pages = 1; 1524 req->pages[0] = tmp_page; 1525 req->page_descs[0].offset = 0; 1526 req->page_descs[0].length = PAGE_SIZE; 1527 req->end = fuse_writepage_end; 1528 req->inode = inode; 1529 1530 inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK); 1531 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); 1532 end_page_writeback(page); 1533 1534 spin_lock(&fc->lock); 1535 list_add(&req->writepages_entry, &fi->writepages); 1536 list_add_tail(&req->list, &fi->queued_writes); 1537 fuse_flush_writepages(inode); 1538 spin_unlock(&fc->lock); 1539 1540 return 0; 1541 1542 err_free: 1543 fuse_request_free(req); 1544 err: 1545 end_page_writeback(page); 1546 return -ENOMEM; 1547 } 1548 1549 static int fuse_writepage(struct page *page, struct writeback_control *wbc) 1550 { 1551 int err; 1552 1553 err = fuse_writepage_locked(page); 1554 unlock_page(page); 1555 1556 return err; 1557 } 1558 1559 static int fuse_launder_page(struct page *page) 1560 { 1561 int err = 0; 1562 if (clear_page_dirty_for_io(page)) { 1563 struct inode *inode = page->mapping->host; 1564 err = fuse_writepage_locked(page); 1565 if (!err) 1566 fuse_wait_on_page_writeback(inode, page->index); 1567 } 1568 return err; 1569 } 1570 1571 /* 1572 * Write back dirty pages now, because there may not be any suitable 1573 * open files later 1574 */ 1575 static void fuse_vma_close(struct vm_area_struct *vma) 1576 { 1577 filemap_write_and_wait(vma->vm_file->f_mapping); 1578 } 1579 1580 /* 1581 * Wait for writeback against this page to complete before allowing it 1582 * to be marked dirty again, and hence written back again, possibly 1583 * before the previous writepage completed. 1584 * 1585 * Block here, instead of in ->writepage(), so that the userspace fs 1586 * can only block processes actually operating on the filesystem. 1587 * 1588 * Otherwise unprivileged userspace fs would be able to block 1589 * unrelated: 1590 * 1591 * - page migration 1592 * - sync(2) 1593 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER 1594 */ 1595 static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 1596 { 1597 struct page *page = vmf->page; 1598 /* 1599 * Don't use page->mapping as it may become NULL from a 1600 * concurrent truncate. 1601 */ 1602 struct inode *inode = vma->vm_file->f_mapping->host; 1603 1604 fuse_wait_on_page_writeback(inode, page->index); 1605 return 0; 1606 } 1607 1608 static const struct vm_operations_struct fuse_file_vm_ops = { 1609 .close = fuse_vma_close, 1610 .fault = filemap_fault, 1611 .page_mkwrite = fuse_page_mkwrite, 1612 .remap_pages = generic_file_remap_pages, 1613 }; 1614 1615 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) 1616 { 1617 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) { 1618 struct inode *inode = file_inode(file); 1619 struct fuse_conn *fc = get_fuse_conn(inode); 1620 struct fuse_inode *fi = get_fuse_inode(inode); 1621 struct fuse_file *ff = file->private_data; 1622 /* 1623 * file may be written through mmap, so chain it onto the 1624 * inodes's write_file list 1625 */ 1626 spin_lock(&fc->lock); 1627 if (list_empty(&ff->write_entry)) 1628 list_add(&ff->write_entry, &fi->write_files); 1629 spin_unlock(&fc->lock); 1630 } 1631 file_accessed(file); 1632 vma->vm_ops = &fuse_file_vm_ops; 1633 return 0; 1634 } 1635 1636 static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma) 1637 { 1638 /* Can't provide the coherency needed for MAP_SHARED */ 1639 if (vma->vm_flags & VM_MAYSHARE) 1640 return -ENODEV; 1641 1642 invalidate_inode_pages2(file->f_mapping); 1643 1644 return generic_file_mmap(file, vma); 1645 } 1646 1647 static int convert_fuse_file_lock(const struct fuse_file_lock *ffl, 1648 struct file_lock *fl) 1649 { 1650 switch (ffl->type) { 1651 case F_UNLCK: 1652 break; 1653 1654 case F_RDLCK: 1655 case F_WRLCK: 1656 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX || 1657 ffl->end < ffl->start) 1658 return -EIO; 1659 1660 fl->fl_start = ffl->start; 1661 fl->fl_end = ffl->end; 1662 fl->fl_pid = ffl->pid; 1663 break; 1664 1665 default: 1666 return -EIO; 1667 } 1668 fl->fl_type = ffl->type; 1669 return 0; 1670 } 1671 1672 static void fuse_lk_fill(struct fuse_req *req, struct file *file, 1673 const struct file_lock *fl, int opcode, pid_t pid, 1674 int flock) 1675 { 1676 struct inode *inode = file_inode(file); 1677 struct fuse_conn *fc = get_fuse_conn(inode); 1678 struct fuse_file *ff = file->private_data; 1679 struct fuse_lk_in *arg = &req->misc.lk_in; 1680 1681 arg->fh = ff->fh; 1682 arg->owner = fuse_lock_owner_id(fc, fl->fl_owner); 1683 arg->lk.start = fl->fl_start; 1684 arg->lk.end = fl->fl_end; 1685 arg->lk.type = fl->fl_type; 1686 arg->lk.pid = pid; 1687 if (flock) 1688 arg->lk_flags |= FUSE_LK_FLOCK; 1689 req->in.h.opcode = opcode; 1690 req->in.h.nodeid = get_node_id(inode); 1691 req->in.numargs = 1; 1692 req->in.args[0].size = sizeof(*arg); 1693 req->in.args[0].value = arg; 1694 } 1695 1696 static int fuse_getlk(struct file *file, struct file_lock *fl) 1697 { 1698 struct inode *inode = file_inode(file); 1699 struct fuse_conn *fc = get_fuse_conn(inode); 1700 struct fuse_req *req; 1701 struct fuse_lk_out outarg; 1702 int err; 1703 1704 req = fuse_get_req_nopages(fc); 1705 if (IS_ERR(req)) 1706 return PTR_ERR(req); 1707 1708 fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0); 1709 req->out.numargs = 1; 1710 req->out.args[0].size = sizeof(outarg); 1711 req->out.args[0].value = &outarg; 1712 fuse_request_send(fc, req); 1713 err = req->out.h.error; 1714 fuse_put_request(fc, req); 1715 if (!err) 1716 err = convert_fuse_file_lock(&outarg.lk, fl); 1717 1718 return err; 1719 } 1720 1721 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) 1722 { 1723 struct inode *inode = file_inode(file); 1724 struct fuse_conn *fc = get_fuse_conn(inode); 1725 struct fuse_req *req; 1726 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; 1727 pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0; 1728 int err; 1729 1730 if (fl->fl_lmops && fl->fl_lmops->lm_grant) { 1731 /* NLM needs asynchronous locks, which we don't support yet */ 1732 return -ENOLCK; 1733 } 1734 1735 /* Unlock on close is handled by the flush method */ 1736 if (fl->fl_flags & FL_CLOSE) 1737 return 0; 1738 1739 req = fuse_get_req_nopages(fc); 1740 if (IS_ERR(req)) 1741 return PTR_ERR(req); 1742 1743 fuse_lk_fill(req, file, fl, opcode, pid, flock); 1744 fuse_request_send(fc, req); 1745 err = req->out.h.error; 1746 /* locking is restartable */ 1747 if (err == -EINTR) 1748 err = -ERESTARTSYS; 1749 fuse_put_request(fc, req); 1750 return err; 1751 } 1752 1753 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) 1754 { 1755 struct inode *inode = file_inode(file); 1756 struct fuse_conn *fc = get_fuse_conn(inode); 1757 int err; 1758 1759 if (cmd == F_CANCELLK) { 1760 err = 0; 1761 } else if (cmd == F_GETLK) { 1762 if (fc->no_lock) { 1763 posix_test_lock(file, fl); 1764 err = 0; 1765 } else 1766 err = fuse_getlk(file, fl); 1767 } else { 1768 if (fc->no_lock) 1769 err = posix_lock_file(file, fl, NULL); 1770 else 1771 err = fuse_setlk(file, fl, 0); 1772 } 1773 return err; 1774 } 1775 1776 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) 1777 { 1778 struct inode *inode = file_inode(file); 1779 struct fuse_conn *fc = get_fuse_conn(inode); 1780 int err; 1781 1782 if (fc->no_flock) { 1783 err = flock_lock_file_wait(file, fl); 1784 } else { 1785 struct fuse_file *ff = file->private_data; 1786 1787 /* emulate flock with POSIX locks */ 1788 fl->fl_owner = (fl_owner_t) file; 1789 ff->flock = true; 1790 err = fuse_setlk(file, fl, 1); 1791 } 1792 1793 return err; 1794 } 1795 1796 static sector_t fuse_bmap(struct address_space *mapping, sector_t block) 1797 { 1798 struct inode *inode = mapping->host; 1799 struct fuse_conn *fc = get_fuse_conn(inode); 1800 struct fuse_req *req; 1801 struct fuse_bmap_in inarg; 1802 struct fuse_bmap_out outarg; 1803 int err; 1804 1805 if (!inode->i_sb->s_bdev || fc->no_bmap) 1806 return 0; 1807 1808 req = fuse_get_req_nopages(fc); 1809 if (IS_ERR(req)) 1810 return 0; 1811 1812 memset(&inarg, 0, sizeof(inarg)); 1813 inarg.block = block; 1814 inarg.blocksize = inode->i_sb->s_blocksize; 1815 req->in.h.opcode = FUSE_BMAP; 1816 req->in.h.nodeid = get_node_id(inode); 1817 req->in.numargs = 1; 1818 req->in.args[0].size = sizeof(inarg); 1819 req->in.args[0].value = &inarg; 1820 req->out.numargs = 1; 1821 req->out.args[0].size = sizeof(outarg); 1822 req->out.args[0].value = &outarg; 1823 fuse_request_send(fc, req); 1824 err = req->out.h.error; 1825 fuse_put_request(fc, req); 1826 if (err == -ENOSYS) 1827 fc->no_bmap = 1; 1828 1829 return err ? 0 : outarg.block; 1830 } 1831 1832 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence) 1833 { 1834 loff_t retval; 1835 struct inode *inode = file_inode(file); 1836 1837 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */ 1838 if (whence == SEEK_CUR || whence == SEEK_SET) 1839 return generic_file_llseek(file, offset, whence); 1840 1841 mutex_lock(&inode->i_mutex); 1842 retval = fuse_update_attributes(inode, NULL, file, NULL); 1843 if (!retval) 1844 retval = generic_file_llseek(file, offset, whence); 1845 mutex_unlock(&inode->i_mutex); 1846 1847 return retval; 1848 } 1849 1850 static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov, 1851 unsigned int nr_segs, size_t bytes, bool to_user) 1852 { 1853 struct iov_iter ii; 1854 int page_idx = 0; 1855 1856 if (!bytes) 1857 return 0; 1858 1859 iov_iter_init(&ii, iov, nr_segs, bytes, 0); 1860 1861 while (iov_iter_count(&ii)) { 1862 struct page *page = pages[page_idx++]; 1863 size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii)); 1864 void *kaddr; 1865 1866 kaddr = kmap(page); 1867 1868 while (todo) { 1869 char __user *uaddr = ii.iov->iov_base + ii.iov_offset; 1870 size_t iov_len = ii.iov->iov_len - ii.iov_offset; 1871 size_t copy = min(todo, iov_len); 1872 size_t left; 1873 1874 if (!to_user) 1875 left = copy_from_user(kaddr, uaddr, copy); 1876 else 1877 left = copy_to_user(uaddr, kaddr, copy); 1878 1879 if (unlikely(left)) 1880 return -EFAULT; 1881 1882 iov_iter_advance(&ii, copy); 1883 todo -= copy; 1884 kaddr += copy; 1885 } 1886 1887 kunmap(page); 1888 } 1889 1890 return 0; 1891 } 1892 1893 /* 1894 * CUSE servers compiled on 32bit broke on 64bit kernels because the 1895 * ABI was defined to be 'struct iovec' which is different on 32bit 1896 * and 64bit. Fortunately we can determine which structure the server 1897 * used from the size of the reply. 1898 */ 1899 static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src, 1900 size_t transferred, unsigned count, 1901 bool is_compat) 1902 { 1903 #ifdef CONFIG_COMPAT 1904 if (count * sizeof(struct compat_iovec) == transferred) { 1905 struct compat_iovec *ciov = src; 1906 unsigned i; 1907 1908 /* 1909 * With this interface a 32bit server cannot support 1910 * non-compat (i.e. ones coming from 64bit apps) ioctl 1911 * requests 1912 */ 1913 if (!is_compat) 1914 return -EINVAL; 1915 1916 for (i = 0; i < count; i++) { 1917 dst[i].iov_base = compat_ptr(ciov[i].iov_base); 1918 dst[i].iov_len = ciov[i].iov_len; 1919 } 1920 return 0; 1921 } 1922 #endif 1923 1924 if (count * sizeof(struct iovec) != transferred) 1925 return -EIO; 1926 1927 memcpy(dst, src, transferred); 1928 return 0; 1929 } 1930 1931 /* Make sure iov_length() won't overflow */ 1932 static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) 1933 { 1934 size_t n; 1935 u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; 1936 1937 for (n = 0; n < count; n++, iov++) { 1938 if (iov->iov_len > (size_t) max) 1939 return -ENOMEM; 1940 max -= iov->iov_len; 1941 } 1942 return 0; 1943 } 1944 1945 static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst, 1946 void *src, size_t transferred, unsigned count, 1947 bool is_compat) 1948 { 1949 unsigned i; 1950 struct fuse_ioctl_iovec *fiov = src; 1951 1952 if (fc->minor < 16) { 1953 return fuse_copy_ioctl_iovec_old(dst, src, transferred, 1954 count, is_compat); 1955 } 1956 1957 if (count * sizeof(struct fuse_ioctl_iovec) != transferred) 1958 return -EIO; 1959 1960 for (i = 0; i < count; i++) { 1961 /* Did the server supply an inappropriate value? */ 1962 if (fiov[i].base != (unsigned long) fiov[i].base || 1963 fiov[i].len != (unsigned long) fiov[i].len) 1964 return -EIO; 1965 1966 dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base; 1967 dst[i].iov_len = (size_t) fiov[i].len; 1968 1969 #ifdef CONFIG_COMPAT 1970 if (is_compat && 1971 (ptr_to_compat(dst[i].iov_base) != fiov[i].base || 1972 (compat_size_t) dst[i].iov_len != fiov[i].len)) 1973 return -EIO; 1974 #endif 1975 } 1976 1977 return 0; 1978 } 1979 1980 1981 /* 1982 * For ioctls, there is no generic way to determine how much memory 1983 * needs to be read and/or written. Furthermore, ioctls are allowed 1984 * to dereference the passed pointer, so the parameter requires deep 1985 * copying but FUSE has no idea whatsoever about what to copy in or 1986 * out. 1987 * 1988 * This is solved by allowing FUSE server to retry ioctl with 1989 * necessary in/out iovecs. Let's assume the ioctl implementation 1990 * needs to read in the following structure. 1991 * 1992 * struct a { 1993 * char *buf; 1994 * size_t buflen; 1995 * } 1996 * 1997 * On the first callout to FUSE server, inarg->in_size and 1998 * inarg->out_size will be NULL; then, the server completes the ioctl 1999 * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and 2000 * the actual iov array to 2001 * 2002 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } } 2003 * 2004 * which tells FUSE to copy in the requested area and retry the ioctl. 2005 * On the second round, the server has access to the structure and 2006 * from that it can tell what to look for next, so on the invocation, 2007 * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to 2008 * 2009 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) }, 2010 * { .iov_base = a.buf, .iov_len = a.buflen } } 2011 * 2012 * FUSE will copy both struct a and the pointed buffer from the 2013 * process doing the ioctl and retry ioctl with both struct a and the 2014 * buffer. 2015 * 2016 * This time, FUSE server has everything it needs and completes ioctl 2017 * without FUSE_IOCTL_RETRY which finishes the ioctl call. 2018 * 2019 * Copying data out works the same way. 2020 * 2021 * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel 2022 * automatically initializes in and out iovs by decoding @cmd with 2023 * _IOC_* macros and the server is not allowed to request RETRY. This 2024 * limits ioctl data transfers to well-formed ioctls and is the forced 2025 * behavior for all FUSE servers. 2026 */ 2027 long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, 2028 unsigned int flags) 2029 { 2030 struct fuse_file *ff = file->private_data; 2031 struct fuse_conn *fc = ff->fc; 2032 struct fuse_ioctl_in inarg = { 2033 .fh = ff->fh, 2034 .cmd = cmd, 2035 .arg = arg, 2036 .flags = flags 2037 }; 2038 struct fuse_ioctl_out outarg; 2039 struct fuse_req *req = NULL; 2040 struct page **pages = NULL; 2041 struct iovec *iov_page = NULL; 2042 struct iovec *in_iov = NULL, *out_iov = NULL; 2043 unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages; 2044 size_t in_size, out_size, transferred; 2045 int err; 2046 2047 #if BITS_PER_LONG == 32 2048 inarg.flags |= FUSE_IOCTL_32BIT; 2049 #else 2050 if (flags & FUSE_IOCTL_COMPAT) 2051 inarg.flags |= FUSE_IOCTL_32BIT; 2052 #endif 2053 2054 /* assume all the iovs returned by client always fits in a page */ 2055 BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); 2056 2057 err = -ENOMEM; 2058 pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, sizeof(pages[0]), GFP_KERNEL); 2059 iov_page = (struct iovec *) __get_free_page(GFP_KERNEL); 2060 if (!pages || !iov_page) 2061 goto out; 2062 2063 /* 2064 * If restricted, initialize IO parameters as encoded in @cmd. 2065 * RETRY from server is not allowed. 2066 */ 2067 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) { 2068 struct iovec *iov = iov_page; 2069 2070 iov->iov_base = (void __user *)arg; 2071 iov->iov_len = _IOC_SIZE(cmd); 2072 2073 if (_IOC_DIR(cmd) & _IOC_WRITE) { 2074 in_iov = iov; 2075 in_iovs = 1; 2076 } 2077 2078 if (_IOC_DIR(cmd) & _IOC_READ) { 2079 out_iov = iov; 2080 out_iovs = 1; 2081 } 2082 } 2083 2084 retry: 2085 inarg.in_size = in_size = iov_length(in_iov, in_iovs); 2086 inarg.out_size = out_size = iov_length(out_iov, out_iovs); 2087 2088 /* 2089 * Out data can be used either for actual out data or iovs, 2090 * make sure there always is at least one page. 2091 */ 2092 out_size = max_t(size_t, out_size, PAGE_SIZE); 2093 max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE); 2094 2095 /* make sure there are enough buffer pages and init request with them */ 2096 err = -ENOMEM; 2097 if (max_pages > FUSE_MAX_PAGES_PER_REQ) 2098 goto out; 2099 while (num_pages < max_pages) { 2100 pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); 2101 if (!pages[num_pages]) 2102 goto out; 2103 num_pages++; 2104 } 2105 2106 req = fuse_get_req(fc, num_pages); 2107 if (IS_ERR(req)) { 2108 err = PTR_ERR(req); 2109 req = NULL; 2110 goto out; 2111 } 2112 memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages); 2113 req->num_pages = num_pages; 2114 fuse_page_descs_length_init(req, 0, req->num_pages); 2115 2116 /* okay, let's send it to the client */ 2117 req->in.h.opcode = FUSE_IOCTL; 2118 req->in.h.nodeid = ff->nodeid; 2119 req->in.numargs = 1; 2120 req->in.args[0].size = sizeof(inarg); 2121 req->in.args[0].value = &inarg; 2122 if (in_size) { 2123 req->in.numargs++; 2124 req->in.args[1].size = in_size; 2125 req->in.argpages = 1; 2126 2127 err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size, 2128 false); 2129 if (err) 2130 goto out; 2131 } 2132 2133 req->out.numargs = 2; 2134 req->out.args[0].size = sizeof(outarg); 2135 req->out.args[0].value = &outarg; 2136 req->out.args[1].size = out_size; 2137 req->out.argpages = 1; 2138 req->out.argvar = 1; 2139 2140 fuse_request_send(fc, req); 2141 err = req->out.h.error; 2142 transferred = req->out.args[1].size; 2143 fuse_put_request(fc, req); 2144 req = NULL; 2145 if (err) 2146 goto out; 2147 2148 /* did it ask for retry? */ 2149 if (outarg.flags & FUSE_IOCTL_RETRY) { 2150 void *vaddr; 2151 2152 /* no retry if in restricted mode */ 2153 err = -EIO; 2154 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) 2155 goto out; 2156 2157 in_iovs = outarg.in_iovs; 2158 out_iovs = outarg.out_iovs; 2159 2160 /* 2161 * Make sure things are in boundary, separate checks 2162 * are to protect against overflow. 2163 */ 2164 err = -ENOMEM; 2165 if (in_iovs > FUSE_IOCTL_MAX_IOV || 2166 out_iovs > FUSE_IOCTL_MAX_IOV || 2167 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) 2168 goto out; 2169 2170 vaddr = kmap_atomic(pages[0]); 2171 err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr, 2172 transferred, in_iovs + out_iovs, 2173 (flags & FUSE_IOCTL_COMPAT) != 0); 2174 kunmap_atomic(vaddr); 2175 if (err) 2176 goto out; 2177 2178 in_iov = iov_page; 2179 out_iov = in_iov + in_iovs; 2180 2181 err = fuse_verify_ioctl_iov(in_iov, in_iovs); 2182 if (err) 2183 goto out; 2184 2185 err = fuse_verify_ioctl_iov(out_iov, out_iovs); 2186 if (err) 2187 goto out; 2188 2189 goto retry; 2190 } 2191 2192 err = -EIO; 2193 if (transferred > inarg.out_size) 2194 goto out; 2195 2196 err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true); 2197 out: 2198 if (req) 2199 fuse_put_request(fc, req); 2200 free_page((unsigned long) iov_page); 2201 while (num_pages) 2202 __free_page(pages[--num_pages]); 2203 kfree(pages); 2204 2205 return err ? err : outarg.result; 2206 } 2207 EXPORT_SYMBOL_GPL(fuse_do_ioctl); 2208 2209 long fuse_ioctl_common(struct file *file, unsigned int cmd, 2210 unsigned long arg, unsigned int flags) 2211 { 2212 struct inode *inode = file_inode(file); 2213 struct fuse_conn *fc = get_fuse_conn(inode); 2214 2215 if (!fuse_allow_current_process(fc)) 2216 return -EACCES; 2217 2218 if (is_bad_inode(inode)) 2219 return -EIO; 2220 2221 return fuse_do_ioctl(file, cmd, arg, flags); 2222 } 2223 2224 static long fuse_file_ioctl(struct file *file, unsigned int cmd, 2225 unsigned long arg) 2226 { 2227 return fuse_ioctl_common(file, cmd, arg, 0); 2228 } 2229 2230 static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, 2231 unsigned long arg) 2232 { 2233 return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT); 2234 } 2235 2236 /* 2237 * All files which have been polled are linked to RB tree 2238 * fuse_conn->polled_files which is indexed by kh. Walk the tree and 2239 * find the matching one. 2240 */ 2241 static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh, 2242 struct rb_node **parent_out) 2243 { 2244 struct rb_node **link = &fc->polled_files.rb_node; 2245 struct rb_node *last = NULL; 2246 2247 while (*link) { 2248 struct fuse_file *ff; 2249 2250 last = *link; 2251 ff = rb_entry(last, struct fuse_file, polled_node); 2252 2253 if (kh < ff->kh) 2254 link = &last->rb_left; 2255 else if (kh > ff->kh) 2256 link = &last->rb_right; 2257 else 2258 return link; 2259 } 2260 2261 if (parent_out) 2262 *parent_out = last; 2263 return link; 2264 } 2265 2266 /* 2267 * The file is about to be polled. Make sure it's on the polled_files 2268 * RB tree. Note that files once added to the polled_files tree are 2269 * not removed before the file is released. This is because a file 2270 * polled once is likely to be polled again. 2271 */ 2272 static void fuse_register_polled_file(struct fuse_conn *fc, 2273 struct fuse_file *ff) 2274 { 2275 spin_lock(&fc->lock); 2276 if (RB_EMPTY_NODE(&ff->polled_node)) { 2277 struct rb_node **link, *parent; 2278 2279 link = fuse_find_polled_node(fc, ff->kh, &parent); 2280 BUG_ON(*link); 2281 rb_link_node(&ff->polled_node, parent, link); 2282 rb_insert_color(&ff->polled_node, &fc->polled_files); 2283 } 2284 spin_unlock(&fc->lock); 2285 } 2286 2287 unsigned fuse_file_poll(struct file *file, poll_table *wait) 2288 { 2289 struct fuse_file *ff = file->private_data; 2290 struct fuse_conn *fc = ff->fc; 2291 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; 2292 struct fuse_poll_out outarg; 2293 struct fuse_req *req; 2294 int err; 2295 2296 if (fc->no_poll) 2297 return DEFAULT_POLLMASK; 2298 2299 poll_wait(file, &ff->poll_wait, wait); 2300 inarg.events = (__u32)poll_requested_events(wait); 2301 2302 /* 2303 * Ask for notification iff there's someone waiting for it. 2304 * The client may ignore the flag and always notify. 2305 */ 2306 if (waitqueue_active(&ff->poll_wait)) { 2307 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; 2308 fuse_register_polled_file(fc, ff); 2309 } 2310 2311 req = fuse_get_req_nopages(fc); 2312 if (IS_ERR(req)) 2313 return POLLERR; 2314 2315 req->in.h.opcode = FUSE_POLL; 2316 req->in.h.nodeid = ff->nodeid; 2317 req->in.numargs = 1; 2318 req->in.args[0].size = sizeof(inarg); 2319 req->in.args[0].value = &inarg; 2320 req->out.numargs = 1; 2321 req->out.args[0].size = sizeof(outarg); 2322 req->out.args[0].value = &outarg; 2323 fuse_request_send(fc, req); 2324 err = req->out.h.error; 2325 fuse_put_request(fc, req); 2326 2327 if (!err) 2328 return outarg.revents; 2329 if (err == -ENOSYS) { 2330 fc->no_poll = 1; 2331 return DEFAULT_POLLMASK; 2332 } 2333 return POLLERR; 2334 } 2335 EXPORT_SYMBOL_GPL(fuse_file_poll); 2336 2337 /* 2338 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and 2339 * wakes up the poll waiters. 2340 */ 2341 int fuse_notify_poll_wakeup(struct fuse_conn *fc, 2342 struct fuse_notify_poll_wakeup_out *outarg) 2343 { 2344 u64 kh = outarg->kh; 2345 struct rb_node **link; 2346 2347 spin_lock(&fc->lock); 2348 2349 link = fuse_find_polled_node(fc, kh, NULL); 2350 if (*link) { 2351 struct fuse_file *ff; 2352 2353 ff = rb_entry(*link, struct fuse_file, polled_node); 2354 wake_up_interruptible_sync(&ff->poll_wait); 2355 } 2356 2357 spin_unlock(&fc->lock); 2358 return 0; 2359 } 2360 2361 static void fuse_do_truncate(struct file *file) 2362 { 2363 struct inode *inode = file->f_mapping->host; 2364 struct iattr attr; 2365 2366 attr.ia_valid = ATTR_SIZE; 2367 attr.ia_size = i_size_read(inode); 2368 2369 attr.ia_file = file; 2370 attr.ia_valid |= ATTR_FILE; 2371 2372 fuse_do_setattr(inode, &attr, file); 2373 } 2374 2375 static inline loff_t fuse_round_up(loff_t off) 2376 { 2377 return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); 2378 } 2379 2380 static ssize_t 2381 fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 2382 loff_t offset, unsigned long nr_segs) 2383 { 2384 ssize_t ret = 0; 2385 struct file *file = iocb->ki_filp; 2386 struct fuse_file *ff = file->private_data; 2387 bool async_dio = ff->fc->async_dio; 2388 loff_t pos = 0; 2389 struct inode *inode; 2390 loff_t i_size; 2391 size_t count = iov_length(iov, nr_segs); 2392 struct fuse_io_priv *io; 2393 2394 pos = offset; 2395 inode = file->f_mapping->host; 2396 i_size = i_size_read(inode); 2397 2398 /* optimization for short read */ 2399 if (async_dio && rw != WRITE && offset + count > i_size) { 2400 if (offset >= i_size) 2401 return 0; 2402 count = min_t(loff_t, count, fuse_round_up(i_size - offset)); 2403 } 2404 2405 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); 2406 if (!io) 2407 return -ENOMEM; 2408 spin_lock_init(&io->lock); 2409 io->reqs = 1; 2410 io->bytes = -1; 2411 io->size = 0; 2412 io->offset = offset; 2413 io->write = (rw == WRITE); 2414 io->err = 0; 2415 io->file = file; 2416 /* 2417 * By default, we want to optimize all I/Os with async request 2418 * submission to the client filesystem if supported. 2419 */ 2420 io->async = async_dio; 2421 io->iocb = iocb; 2422 2423 /* 2424 * We cannot asynchronously extend the size of a file. We have no method 2425 * to wait on real async I/O requests, so we must submit this request 2426 * synchronously. 2427 */ 2428 if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE) 2429 io->async = false; 2430 2431 if (rw == WRITE) 2432 ret = __fuse_direct_write(io, iov, nr_segs, &pos); 2433 else 2434 ret = __fuse_direct_read(io, iov, nr_segs, &pos, count); 2435 2436 if (io->async) { 2437 fuse_aio_complete(io, ret < 0 ? ret : 0, -1); 2438 2439 /* we have a non-extending, async request, so return */ 2440 if (!is_sync_kiocb(iocb)) 2441 return -EIOCBQUEUED; 2442 2443 ret = wait_on_sync_kiocb(iocb); 2444 } else { 2445 kfree(io); 2446 } 2447 2448 if (rw == WRITE) { 2449 if (ret > 0) 2450 fuse_write_update_size(inode, pos); 2451 else if (ret < 0 && offset + count > i_size) 2452 fuse_do_truncate(file); 2453 } 2454 2455 return ret; 2456 } 2457 2458 static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, 2459 loff_t length) 2460 { 2461 struct fuse_file *ff = file->private_data; 2462 struct inode *inode = file->f_inode; 2463 struct fuse_conn *fc = ff->fc; 2464 struct fuse_req *req; 2465 struct fuse_fallocate_in inarg = { 2466 .fh = ff->fh, 2467 .offset = offset, 2468 .length = length, 2469 .mode = mode 2470 }; 2471 int err; 2472 bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) || 2473 (mode & FALLOC_FL_PUNCH_HOLE); 2474 2475 if (fc->no_fallocate) 2476 return -EOPNOTSUPP; 2477 2478 if (lock_inode) { 2479 mutex_lock(&inode->i_mutex); 2480 if (mode & FALLOC_FL_PUNCH_HOLE) 2481 fuse_set_nowrite(inode); 2482 } 2483 2484 req = fuse_get_req_nopages(fc); 2485 if (IS_ERR(req)) { 2486 err = PTR_ERR(req); 2487 goto out; 2488 } 2489 2490 req->in.h.opcode = FUSE_FALLOCATE; 2491 req->in.h.nodeid = ff->nodeid; 2492 req->in.numargs = 1; 2493 req->in.args[0].size = sizeof(inarg); 2494 req->in.args[0].value = &inarg; 2495 fuse_request_send(fc, req); 2496 err = req->out.h.error; 2497 if (err == -ENOSYS) { 2498 fc->no_fallocate = 1; 2499 err = -EOPNOTSUPP; 2500 } 2501 fuse_put_request(fc, req); 2502 2503 if (err) 2504 goto out; 2505 2506 /* we could have extended the file */ 2507 if (!(mode & FALLOC_FL_KEEP_SIZE)) 2508 fuse_write_update_size(inode, offset + length); 2509 2510 if (mode & FALLOC_FL_PUNCH_HOLE) 2511 truncate_pagecache_range(inode, offset, offset + length - 1); 2512 2513 fuse_invalidate_attr(inode); 2514 2515 out: 2516 if (lock_inode) { 2517 if (mode & FALLOC_FL_PUNCH_HOLE) 2518 fuse_release_nowrite(inode); 2519 mutex_unlock(&inode->i_mutex); 2520 } 2521 2522 return err; 2523 } 2524 2525 static const struct file_operations fuse_file_operations = { 2526 .llseek = fuse_file_llseek, 2527 .read = do_sync_read, 2528 .aio_read = fuse_file_aio_read, 2529 .write = do_sync_write, 2530 .aio_write = fuse_file_aio_write, 2531 .mmap = fuse_file_mmap, 2532 .open = fuse_open, 2533 .flush = fuse_flush, 2534 .release = fuse_release, 2535 .fsync = fuse_fsync, 2536 .lock = fuse_file_lock, 2537 .flock = fuse_file_flock, 2538 .splice_read = generic_file_splice_read, 2539 .unlocked_ioctl = fuse_file_ioctl, 2540 .compat_ioctl = fuse_file_compat_ioctl, 2541 .poll = fuse_file_poll, 2542 .fallocate = fuse_file_fallocate, 2543 }; 2544 2545 static const struct file_operations fuse_direct_io_file_operations = { 2546 .llseek = fuse_file_llseek, 2547 .read = fuse_direct_read, 2548 .write = fuse_direct_write, 2549 .mmap = fuse_direct_mmap, 2550 .open = fuse_open, 2551 .flush = fuse_flush, 2552 .release = fuse_release, 2553 .fsync = fuse_fsync, 2554 .lock = fuse_file_lock, 2555 .flock = fuse_file_flock, 2556 .unlocked_ioctl = fuse_file_ioctl, 2557 .compat_ioctl = fuse_file_compat_ioctl, 2558 .poll = fuse_file_poll, 2559 .fallocate = fuse_file_fallocate, 2560 /* no splice_read */ 2561 }; 2562 2563 static const struct address_space_operations fuse_file_aops = { 2564 .readpage = fuse_readpage, 2565 .writepage = fuse_writepage, 2566 .launder_page = fuse_launder_page, 2567 .readpages = fuse_readpages, 2568 .set_page_dirty = __set_page_dirty_nobuffers, 2569 .bmap = fuse_bmap, 2570 .direct_IO = fuse_direct_IO, 2571 }; 2572 2573 void fuse_init_file_inode(struct inode *inode) 2574 { 2575 inode->i_fop = &fuse_file_operations; 2576 inode->i_data.a_ops = &fuse_file_aops; 2577 } 2578