1 /* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7 */ 8 9 #include "fuse_i.h" 10 11 #include <linux/pagemap.h> 12 #include <linux/slab.h> 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/sched/signal.h> 16 #include <linux/module.h> 17 #include <linux/compat.h> 18 #include <linux/swap.h> 19 #include <linux/falloc.h> 20 #include <linux/uio.h> 21 22 static struct page **fuse_pages_alloc(unsigned int npages, gfp_t flags, 23 struct fuse_page_desc **desc) 24 { 25 struct page **pages; 26 27 pages = kzalloc(npages * (sizeof(struct page *) + 28 sizeof(struct fuse_page_desc)), flags); 29 *desc = (void *) (pages + npages); 30 31 return pages; 32 } 33 34 static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file, 35 int opcode, struct fuse_open_out *outargp) 36 { 37 struct fuse_open_in inarg; 38 FUSE_ARGS(args); 39 40 memset(&inarg, 0, sizeof(inarg)); 41 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); 42 if (!fc->atomic_o_trunc) 43 inarg.flags &= ~O_TRUNC; 44 args.opcode = opcode; 45 args.nodeid = nodeid; 46 args.in_numargs = 1; 47 args.in_args[0].size = sizeof(inarg); 48 args.in_args[0].value = &inarg; 49 args.out_numargs = 1; 50 args.out_args[0].size = sizeof(*outargp); 51 args.out_args[0].value = outargp; 52 53 return fuse_simple_request(fc, &args); 54 } 55 56 struct fuse_release_args { 57 struct fuse_args args; 58 struct fuse_release_in inarg; 59 struct inode *inode; 60 }; 61 62 struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) 63 { 64 struct fuse_file *ff; 65 66 ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL_ACCOUNT); 67 if (unlikely(!ff)) 68 return NULL; 69 70 ff->fc = fc; 71 ff->release_args = kzalloc(sizeof(*ff->release_args), 72 GFP_KERNEL_ACCOUNT); 73 if (!ff->release_args) { 74 kfree(ff); 75 return NULL; 76 } 77 78 INIT_LIST_HEAD(&ff->write_entry); 79 mutex_init(&ff->readdir.lock); 80 refcount_set(&ff->count, 1); 81 RB_CLEAR_NODE(&ff->polled_node); 82 init_waitqueue_head(&ff->poll_wait); 83 84 ff->kh = atomic64_inc_return(&fc->khctr); 85 86 return ff; 87 } 88 89 void fuse_file_free(struct fuse_file *ff) 90 { 91 kfree(ff->release_args); 92 mutex_destroy(&ff->readdir.lock); 93 kfree(ff); 94 } 95 96 static struct fuse_file *fuse_file_get(struct fuse_file *ff) 97 { 98 refcount_inc(&ff->count); 99 return ff; 100 } 101 102 static void fuse_release_end(struct fuse_conn *fc, struct fuse_args *args, 103 int error) 104 { 105 struct fuse_release_args *ra = container_of(args, typeof(*ra), args); 106 107 iput(ra->inode); 108 kfree(ra); 109 } 110 111 static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir) 112 { 113 if (refcount_dec_and_test(&ff->count)) { 114 struct fuse_args *args = &ff->release_args->args; 115 116 if (isdir ? ff->fc->no_opendir : ff->fc->no_open) { 117 /* Do nothing when client does not implement 'open' */ 118 fuse_release_end(ff->fc, args, 0); 119 } else if (sync) { 120 fuse_simple_request(ff->fc, args); 121 fuse_release_end(ff->fc, args, 0); 122 } else { 123 args->end = fuse_release_end; 124 if (fuse_simple_background(ff->fc, args, 125 GFP_KERNEL | __GFP_NOFAIL)) 126 fuse_release_end(ff->fc, args, -ENOTCONN); 127 } 128 kfree(ff); 129 } 130 } 131 132 int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, 133 bool isdir) 134 { 135 struct fuse_file *ff; 136 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; 137 138 ff = fuse_file_alloc(fc); 139 if (!ff) 140 return -ENOMEM; 141 142 ff->fh = 0; 143 /* Default for no-open */ 144 ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0); 145 if (isdir ? !fc->no_opendir : !fc->no_open) { 146 struct fuse_open_out outarg; 147 int err; 148 149 err = fuse_send_open(fc, nodeid, file, opcode, &outarg); 150 if (!err) { 151 ff->fh = outarg.fh; 152 ff->open_flags = outarg.open_flags; 153 154 } else if (err != -ENOSYS) { 155 fuse_file_free(ff); 156 return err; 157 } else { 158 if (isdir) 159 fc->no_opendir = 1; 160 else 161 fc->no_open = 1; 162 } 163 } 164 165 if (isdir) 166 ff->open_flags &= ~FOPEN_DIRECT_IO; 167 168 ff->nodeid = nodeid; 169 file->private_data = ff; 170 171 return 0; 172 } 173 EXPORT_SYMBOL_GPL(fuse_do_open); 174 175 static void fuse_link_write_file(struct file *file) 176 { 177 struct inode *inode = file_inode(file); 178 struct fuse_inode *fi = get_fuse_inode(inode); 179 struct fuse_file *ff = file->private_data; 180 /* 181 * file may be written through mmap, so chain it onto the 182 * inodes's write_file list 183 */ 184 spin_lock(&fi->lock); 185 if (list_empty(&ff->write_entry)) 186 list_add(&ff->write_entry, &fi->write_files); 187 spin_unlock(&fi->lock); 188 } 189 190 void fuse_finish_open(struct inode *inode, struct file *file) 191 { 192 struct fuse_file *ff = file->private_data; 193 struct fuse_conn *fc = get_fuse_conn(inode); 194 195 if (!(ff->open_flags & FOPEN_KEEP_CACHE)) 196 invalidate_inode_pages2(inode->i_mapping); 197 if (ff->open_flags & FOPEN_STREAM) 198 stream_open(inode, file); 199 else if (ff->open_flags & FOPEN_NONSEEKABLE) 200 nonseekable_open(inode, file); 201 if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { 202 struct fuse_inode *fi = get_fuse_inode(inode); 203 204 spin_lock(&fi->lock); 205 fi->attr_version = atomic64_inc_return(&fc->attr_version); 206 i_size_write(inode, 0); 207 spin_unlock(&fi->lock); 208 fuse_invalidate_attr(inode); 209 if (fc->writeback_cache) 210 file_update_time(file); 211 } 212 if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache) 213 fuse_link_write_file(file); 214 } 215 216 int fuse_open_common(struct inode *inode, struct file *file, bool isdir) 217 { 218 struct fuse_conn *fc = get_fuse_conn(inode); 219 int err; 220 bool is_wb_truncate = (file->f_flags & O_TRUNC) && 221 fc->atomic_o_trunc && 222 fc->writeback_cache; 223 224 err = generic_file_open(inode, file); 225 if (err) 226 return err; 227 228 if (is_wb_truncate) { 229 inode_lock(inode); 230 fuse_set_nowrite(inode); 231 } 232 233 err = fuse_do_open(fc, get_node_id(inode), file, isdir); 234 235 if (!err) 236 fuse_finish_open(inode, file); 237 238 if (is_wb_truncate) { 239 fuse_release_nowrite(inode); 240 inode_unlock(inode); 241 } 242 243 return err; 244 } 245 246 static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, 247 int flags, int opcode) 248 { 249 struct fuse_conn *fc = ff->fc; 250 struct fuse_release_args *ra = ff->release_args; 251 252 /* Inode is NULL on error path of fuse_create_open() */ 253 if (likely(fi)) { 254 spin_lock(&fi->lock); 255 list_del(&ff->write_entry); 256 spin_unlock(&fi->lock); 257 } 258 spin_lock(&fc->lock); 259 if (!RB_EMPTY_NODE(&ff->polled_node)) 260 rb_erase(&ff->polled_node, &fc->polled_files); 261 spin_unlock(&fc->lock); 262 263 wake_up_interruptible_all(&ff->poll_wait); 264 265 ra->inarg.fh = ff->fh; 266 ra->inarg.flags = flags; 267 ra->args.in_numargs = 1; 268 ra->args.in_args[0].size = sizeof(struct fuse_release_in); 269 ra->args.in_args[0].value = &ra->inarg; 270 ra->args.opcode = opcode; 271 ra->args.nodeid = ff->nodeid; 272 ra->args.force = true; 273 ra->args.nocreds = true; 274 } 275 276 void fuse_release_common(struct file *file, bool isdir) 277 { 278 struct fuse_inode *fi = get_fuse_inode(file_inode(file)); 279 struct fuse_file *ff = file->private_data; 280 struct fuse_release_args *ra = ff->release_args; 281 int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE; 282 283 fuse_prepare_release(fi, ff, file->f_flags, opcode); 284 285 if (ff->flock) { 286 ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; 287 ra->inarg.lock_owner = fuse_lock_owner_id(ff->fc, 288 (fl_owner_t) file); 289 } 290 /* Hold inode until release is finished */ 291 ra->inode = igrab(file_inode(file)); 292 293 /* 294 * Normally this will send the RELEASE request, however if 295 * some asynchronous READ or WRITE requests are outstanding, 296 * the sending will be delayed. 297 * 298 * Make the release synchronous if this is a fuseblk mount, 299 * synchronous RELEASE is allowed (and desirable) in this case 300 * because the server can be trusted not to screw up. 301 */ 302 fuse_file_put(ff, ff->fc->destroy, isdir); 303 } 304 305 static int fuse_open(struct inode *inode, struct file *file) 306 { 307 return fuse_open_common(inode, file, false); 308 } 309 310 static int fuse_release(struct inode *inode, struct file *file) 311 { 312 struct fuse_conn *fc = get_fuse_conn(inode); 313 314 /* see fuse_vma_close() for !writeback_cache case */ 315 if (fc->writeback_cache) 316 write_inode_now(inode, 1); 317 318 fuse_release_common(file, false); 319 320 /* return value is ignored by VFS */ 321 return 0; 322 } 323 324 void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, int flags) 325 { 326 WARN_ON(refcount_read(&ff->count) > 1); 327 fuse_prepare_release(fi, ff, flags, FUSE_RELEASE); 328 /* 329 * iput(NULL) is a no-op and since the refcount is 1 and everything's 330 * synchronous, we are fine with not doing igrab() here" 331 */ 332 fuse_file_put(ff, true, false); 333 } 334 EXPORT_SYMBOL_GPL(fuse_sync_release); 335 336 /* 337 * Scramble the ID space with XTEA, so that the value of the files_struct 338 * pointer is not exposed to userspace. 339 */ 340 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) 341 { 342 u32 *k = fc->scramble_key; 343 u64 v = (unsigned long) id; 344 u32 v0 = v; 345 u32 v1 = v >> 32; 346 u32 sum = 0; 347 int i; 348 349 for (i = 0; i < 32; i++) { 350 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); 351 sum += 0x9E3779B9; 352 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); 353 } 354 355 return (u64) v0 + ((u64) v1 << 32); 356 } 357 358 struct fuse_writepage_args { 359 struct fuse_io_args ia; 360 struct list_head writepages_entry; 361 struct list_head queue_entry; 362 struct fuse_writepage_args *next; 363 struct inode *inode; 364 }; 365 366 static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi, 367 pgoff_t idx_from, pgoff_t idx_to) 368 { 369 struct fuse_writepage_args *wpa; 370 371 list_for_each_entry(wpa, &fi->writepages, writepages_entry) { 372 pgoff_t curr_index; 373 374 WARN_ON(get_fuse_inode(wpa->inode) != fi); 375 curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT; 376 if (idx_from < curr_index + wpa->ia.ap.num_pages && 377 curr_index <= idx_to) { 378 return wpa; 379 } 380 } 381 return NULL; 382 } 383 384 /* 385 * Check if any page in a range is under writeback 386 * 387 * This is currently done by walking the list of writepage requests 388 * for the inode, which can be pretty inefficient. 389 */ 390 static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from, 391 pgoff_t idx_to) 392 { 393 struct fuse_inode *fi = get_fuse_inode(inode); 394 bool found; 395 396 spin_lock(&fi->lock); 397 found = fuse_find_writeback(fi, idx_from, idx_to); 398 spin_unlock(&fi->lock); 399 400 return found; 401 } 402 403 static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index) 404 { 405 return fuse_range_is_writeback(inode, index, index); 406 } 407 408 /* 409 * Wait for page writeback to be completed. 410 * 411 * Since fuse doesn't rely on the VM writeback tracking, this has to 412 * use some other means. 413 */ 414 static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) 415 { 416 struct fuse_inode *fi = get_fuse_inode(inode); 417 418 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); 419 } 420 421 /* 422 * Wait for all pending writepages on the inode to finish. 423 * 424 * This is currently done by blocking further writes with FUSE_NOWRITE 425 * and waiting for all sent writes to complete. 426 * 427 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage 428 * could conflict with truncation. 429 */ 430 static void fuse_sync_writes(struct inode *inode) 431 { 432 fuse_set_nowrite(inode); 433 fuse_release_nowrite(inode); 434 } 435 436 static int fuse_flush(struct file *file, fl_owner_t id) 437 { 438 struct inode *inode = file_inode(file); 439 struct fuse_conn *fc = get_fuse_conn(inode); 440 struct fuse_file *ff = file->private_data; 441 struct fuse_flush_in inarg; 442 FUSE_ARGS(args); 443 int err; 444 445 if (is_bad_inode(inode)) 446 return -EIO; 447 448 if (fc->no_flush) 449 return 0; 450 451 err = write_inode_now(inode, 1); 452 if (err) 453 return err; 454 455 inode_lock(inode); 456 fuse_sync_writes(inode); 457 inode_unlock(inode); 458 459 err = filemap_check_errors(file->f_mapping); 460 if (err) 461 return err; 462 463 memset(&inarg, 0, sizeof(inarg)); 464 inarg.fh = ff->fh; 465 inarg.lock_owner = fuse_lock_owner_id(fc, id); 466 args.opcode = FUSE_FLUSH; 467 args.nodeid = get_node_id(inode); 468 args.in_numargs = 1; 469 args.in_args[0].size = sizeof(inarg); 470 args.in_args[0].value = &inarg; 471 args.force = true; 472 473 err = fuse_simple_request(fc, &args); 474 if (err == -ENOSYS) { 475 fc->no_flush = 1; 476 err = 0; 477 } 478 return err; 479 } 480 481 int fuse_fsync_common(struct file *file, loff_t start, loff_t end, 482 int datasync, int opcode) 483 { 484 struct inode *inode = file->f_mapping->host; 485 struct fuse_conn *fc = get_fuse_conn(inode); 486 struct fuse_file *ff = file->private_data; 487 FUSE_ARGS(args); 488 struct fuse_fsync_in inarg; 489 490 memset(&inarg, 0, sizeof(inarg)); 491 inarg.fh = ff->fh; 492 inarg.fsync_flags = datasync ? FUSE_FSYNC_FDATASYNC : 0; 493 args.opcode = opcode; 494 args.nodeid = get_node_id(inode); 495 args.in_numargs = 1; 496 args.in_args[0].size = sizeof(inarg); 497 args.in_args[0].value = &inarg; 498 return fuse_simple_request(fc, &args); 499 } 500 501 static int fuse_fsync(struct file *file, loff_t start, loff_t end, 502 int datasync) 503 { 504 struct inode *inode = file->f_mapping->host; 505 struct fuse_conn *fc = get_fuse_conn(inode); 506 int err; 507 508 if (is_bad_inode(inode)) 509 return -EIO; 510 511 inode_lock(inode); 512 513 /* 514 * Start writeback against all dirty pages of the inode, then 515 * wait for all outstanding writes, before sending the FSYNC 516 * request. 517 */ 518 err = file_write_and_wait_range(file, start, end); 519 if (err) 520 goto out; 521 522 fuse_sync_writes(inode); 523 524 /* 525 * Due to implementation of fuse writeback 526 * file_write_and_wait_range() does not catch errors. 527 * We have to do this directly after fuse_sync_writes() 528 */ 529 err = file_check_and_advance_wb_err(file); 530 if (err) 531 goto out; 532 533 err = sync_inode_metadata(inode, 1); 534 if (err) 535 goto out; 536 537 if (fc->no_fsync) 538 goto out; 539 540 err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC); 541 if (err == -ENOSYS) { 542 fc->no_fsync = 1; 543 err = 0; 544 } 545 out: 546 inode_unlock(inode); 547 548 return err; 549 } 550 551 void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos, 552 size_t count, int opcode) 553 { 554 struct fuse_file *ff = file->private_data; 555 struct fuse_args *args = &ia->ap.args; 556 557 ia->read.in.fh = ff->fh; 558 ia->read.in.offset = pos; 559 ia->read.in.size = count; 560 ia->read.in.flags = file->f_flags; 561 args->opcode = opcode; 562 args->nodeid = ff->nodeid; 563 args->in_numargs = 1; 564 args->in_args[0].size = sizeof(ia->read.in); 565 args->in_args[0].value = &ia->read.in; 566 args->out_argvar = true; 567 args->out_numargs = 1; 568 args->out_args[0].size = count; 569 } 570 571 static void fuse_release_user_pages(struct fuse_args_pages *ap, 572 bool should_dirty) 573 { 574 unsigned int i; 575 576 for (i = 0; i < ap->num_pages; i++) { 577 if (should_dirty) 578 set_page_dirty_lock(ap->pages[i]); 579 put_page(ap->pages[i]); 580 } 581 } 582 583 static void fuse_io_release(struct kref *kref) 584 { 585 kfree(container_of(kref, struct fuse_io_priv, refcnt)); 586 } 587 588 static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io) 589 { 590 if (io->err) 591 return io->err; 592 593 if (io->bytes >= 0 && io->write) 594 return -EIO; 595 596 return io->bytes < 0 ? io->size : io->bytes; 597 } 598 599 /** 600 * In case of short read, the caller sets 'pos' to the position of 601 * actual end of fuse request in IO request. Otherwise, if bytes_requested 602 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1. 603 * 604 * An example: 605 * User requested DIO read of 64K. It was splitted into two 32K fuse requests, 606 * both submitted asynchronously. The first of them was ACKed by userspace as 607 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The 608 * second request was ACKed as short, e.g. only 1K was read, resulting in 609 * pos == 33K. 610 * 611 * Thus, when all fuse requests are completed, the minimal non-negative 'pos' 612 * will be equal to the length of the longest contiguous fragment of 613 * transferred data starting from the beginning of IO request. 614 */ 615 static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos) 616 { 617 int left; 618 619 spin_lock(&io->lock); 620 if (err) 621 io->err = io->err ? : err; 622 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes)) 623 io->bytes = pos; 624 625 left = --io->reqs; 626 if (!left && io->blocking) 627 complete(io->done); 628 spin_unlock(&io->lock); 629 630 if (!left && !io->blocking) { 631 ssize_t res = fuse_get_res_by_io(io); 632 633 if (res >= 0) { 634 struct inode *inode = file_inode(io->iocb->ki_filp); 635 struct fuse_conn *fc = get_fuse_conn(inode); 636 struct fuse_inode *fi = get_fuse_inode(inode); 637 638 spin_lock(&fi->lock); 639 fi->attr_version = atomic64_inc_return(&fc->attr_version); 640 spin_unlock(&fi->lock); 641 } 642 643 io->iocb->ki_complete(io->iocb, res, 0); 644 } 645 646 kref_put(&io->refcnt, fuse_io_release); 647 } 648 649 static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io, 650 unsigned int npages) 651 { 652 struct fuse_io_args *ia; 653 654 ia = kzalloc(sizeof(*ia), GFP_KERNEL); 655 if (ia) { 656 ia->io = io; 657 ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL, 658 &ia->ap.descs); 659 if (!ia->ap.pages) { 660 kfree(ia); 661 ia = NULL; 662 } 663 } 664 return ia; 665 } 666 667 static void fuse_io_free(struct fuse_io_args *ia) 668 { 669 kfree(ia->ap.pages); 670 kfree(ia); 671 } 672 673 static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_args *args, 674 int err) 675 { 676 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args); 677 struct fuse_io_priv *io = ia->io; 678 ssize_t pos = -1; 679 680 fuse_release_user_pages(&ia->ap, io->should_dirty); 681 682 if (err) { 683 /* Nothing */ 684 } else if (io->write) { 685 if (ia->write.out.size > ia->write.in.size) { 686 err = -EIO; 687 } else if (ia->write.in.size != ia->write.out.size) { 688 pos = ia->write.in.offset - io->offset + 689 ia->write.out.size; 690 } 691 } else { 692 u32 outsize = args->out_args[0].size; 693 694 if (ia->read.in.size != outsize) 695 pos = ia->read.in.offset - io->offset + outsize; 696 } 697 698 fuse_aio_complete(io, err, pos); 699 fuse_io_free(ia); 700 } 701 702 static ssize_t fuse_async_req_send(struct fuse_conn *fc, 703 struct fuse_io_args *ia, size_t num_bytes) 704 { 705 ssize_t err; 706 struct fuse_io_priv *io = ia->io; 707 708 spin_lock(&io->lock); 709 kref_get(&io->refcnt); 710 io->size += num_bytes; 711 io->reqs++; 712 spin_unlock(&io->lock); 713 714 ia->ap.args.end = fuse_aio_complete_req; 715 err = fuse_simple_background(fc, &ia->ap.args, GFP_KERNEL); 716 if (err) 717 fuse_aio_complete_req(fc, &ia->ap.args, err); 718 719 return num_bytes; 720 } 721 722 static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count, 723 fl_owner_t owner) 724 { 725 struct file *file = ia->io->iocb->ki_filp; 726 struct fuse_file *ff = file->private_data; 727 struct fuse_conn *fc = ff->fc; 728 729 fuse_read_args_fill(ia, file, pos, count, FUSE_READ); 730 if (owner != NULL) { 731 ia->read.in.read_flags |= FUSE_READ_LOCKOWNER; 732 ia->read.in.lock_owner = fuse_lock_owner_id(fc, owner); 733 } 734 735 if (ia->io->async) 736 return fuse_async_req_send(fc, ia, count); 737 738 return fuse_simple_request(fc, &ia->ap.args); 739 } 740 741 static void fuse_read_update_size(struct inode *inode, loff_t size, 742 u64 attr_ver) 743 { 744 struct fuse_conn *fc = get_fuse_conn(inode); 745 struct fuse_inode *fi = get_fuse_inode(inode); 746 747 spin_lock(&fi->lock); 748 if (attr_ver == fi->attr_version && size < inode->i_size && 749 !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) { 750 fi->attr_version = atomic64_inc_return(&fc->attr_version); 751 i_size_write(inode, size); 752 } 753 spin_unlock(&fi->lock); 754 } 755 756 static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read, 757 struct fuse_args_pages *ap) 758 { 759 struct fuse_conn *fc = get_fuse_conn(inode); 760 761 if (fc->writeback_cache) { 762 /* 763 * A hole in a file. Some data after the hole are in page cache, 764 * but have not reached the client fs yet. So, the hole is not 765 * present there. 766 */ 767 int i; 768 int start_idx = num_read >> PAGE_SHIFT; 769 size_t off = num_read & (PAGE_SIZE - 1); 770 771 for (i = start_idx; i < ap->num_pages; i++) { 772 zero_user_segment(ap->pages[i], off, PAGE_SIZE); 773 off = 0; 774 } 775 } else { 776 loff_t pos = page_offset(ap->pages[0]) + num_read; 777 fuse_read_update_size(inode, pos, attr_ver); 778 } 779 } 780 781 static int fuse_do_readpage(struct file *file, struct page *page) 782 { 783 struct inode *inode = page->mapping->host; 784 struct fuse_conn *fc = get_fuse_conn(inode); 785 loff_t pos = page_offset(page); 786 struct fuse_page_desc desc = { .length = PAGE_SIZE }; 787 struct fuse_io_args ia = { 788 .ap.args.page_zeroing = true, 789 .ap.args.out_pages = true, 790 .ap.num_pages = 1, 791 .ap.pages = &page, 792 .ap.descs = &desc, 793 }; 794 ssize_t res; 795 u64 attr_ver; 796 797 /* 798 * Page writeback can extend beyond the lifetime of the 799 * page-cache page, so make sure we read a properly synced 800 * page. 801 */ 802 fuse_wait_on_page_writeback(inode, page->index); 803 804 attr_ver = fuse_get_attr_version(fc); 805 806 /* Don't overflow end offset */ 807 if (pos + (desc.length - 1) == LLONG_MAX) 808 desc.length--; 809 810 fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ); 811 res = fuse_simple_request(fc, &ia.ap.args); 812 if (res < 0) 813 return res; 814 /* 815 * Short read means EOF. If file size is larger, truncate it 816 */ 817 if (res < desc.length) 818 fuse_short_read(inode, attr_ver, res, &ia.ap); 819 820 SetPageUptodate(page); 821 822 return 0; 823 } 824 825 static int fuse_readpage(struct file *file, struct page *page) 826 { 827 struct inode *inode = page->mapping->host; 828 int err; 829 830 err = -EIO; 831 if (is_bad_inode(inode)) 832 goto out; 833 834 err = fuse_do_readpage(file, page); 835 fuse_invalidate_atime(inode); 836 out: 837 unlock_page(page); 838 return err; 839 } 840 841 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_args *args, 842 int err) 843 { 844 int i; 845 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args); 846 struct fuse_args_pages *ap = &ia->ap; 847 size_t count = ia->read.in.size; 848 size_t num_read = args->out_args[0].size; 849 struct address_space *mapping = NULL; 850 851 for (i = 0; mapping == NULL && i < ap->num_pages; i++) 852 mapping = ap->pages[i]->mapping; 853 854 if (mapping) { 855 struct inode *inode = mapping->host; 856 857 /* 858 * Short read means EOF. If file size is larger, truncate it 859 */ 860 if (!err && num_read < count) 861 fuse_short_read(inode, ia->read.attr_ver, num_read, ap); 862 863 fuse_invalidate_atime(inode); 864 } 865 866 for (i = 0; i < ap->num_pages; i++) { 867 struct page *page = ap->pages[i]; 868 869 if (!err) 870 SetPageUptodate(page); 871 else 872 SetPageError(page); 873 unlock_page(page); 874 put_page(page); 875 } 876 if (ia->ff) 877 fuse_file_put(ia->ff, false, false); 878 879 fuse_io_free(ia); 880 } 881 882 static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file) 883 { 884 struct fuse_file *ff = file->private_data; 885 struct fuse_conn *fc = ff->fc; 886 struct fuse_args_pages *ap = &ia->ap; 887 loff_t pos = page_offset(ap->pages[0]); 888 size_t count = ap->num_pages << PAGE_SHIFT; 889 ssize_t res; 890 int err; 891 892 ap->args.out_pages = true; 893 ap->args.page_zeroing = true; 894 ap->args.page_replace = true; 895 896 /* Don't overflow end offset */ 897 if (pos + (count - 1) == LLONG_MAX) { 898 count--; 899 ap->descs[ap->num_pages - 1].length--; 900 } 901 WARN_ON((loff_t) (pos + count) < 0); 902 903 fuse_read_args_fill(ia, file, pos, count, FUSE_READ); 904 ia->read.attr_ver = fuse_get_attr_version(fc); 905 if (fc->async_read) { 906 ia->ff = fuse_file_get(ff); 907 ap->args.end = fuse_readpages_end; 908 err = fuse_simple_background(fc, &ap->args, GFP_KERNEL); 909 if (!err) 910 return; 911 } else { 912 res = fuse_simple_request(fc, &ap->args); 913 err = res < 0 ? res : 0; 914 } 915 fuse_readpages_end(fc, &ap->args, err); 916 } 917 918 struct fuse_fill_data { 919 struct fuse_io_args *ia; 920 struct file *file; 921 struct inode *inode; 922 unsigned int nr_pages; 923 unsigned int max_pages; 924 }; 925 926 static int fuse_readpages_fill(void *_data, struct page *page) 927 { 928 struct fuse_fill_data *data = _data; 929 struct fuse_io_args *ia = data->ia; 930 struct fuse_args_pages *ap = &ia->ap; 931 struct inode *inode = data->inode; 932 struct fuse_conn *fc = get_fuse_conn(inode); 933 934 fuse_wait_on_page_writeback(inode, page->index); 935 936 if (ap->num_pages && 937 (ap->num_pages == fc->max_pages || 938 (ap->num_pages + 1) * PAGE_SIZE > fc->max_read || 939 ap->pages[ap->num_pages - 1]->index + 1 != page->index)) { 940 data->max_pages = min_t(unsigned int, data->nr_pages, 941 fc->max_pages); 942 fuse_send_readpages(ia, data->file); 943 data->ia = ia = fuse_io_alloc(NULL, data->max_pages); 944 if (!ia) { 945 unlock_page(page); 946 return -ENOMEM; 947 } 948 ap = &ia->ap; 949 } 950 951 if (WARN_ON(ap->num_pages >= data->max_pages)) { 952 unlock_page(page); 953 fuse_io_free(ia); 954 return -EIO; 955 } 956 957 get_page(page); 958 ap->pages[ap->num_pages] = page; 959 ap->descs[ap->num_pages].length = PAGE_SIZE; 960 ap->num_pages++; 961 data->nr_pages--; 962 return 0; 963 } 964 965 static int fuse_readpages(struct file *file, struct address_space *mapping, 966 struct list_head *pages, unsigned nr_pages) 967 { 968 struct inode *inode = mapping->host; 969 struct fuse_conn *fc = get_fuse_conn(inode); 970 struct fuse_fill_data data; 971 int err; 972 973 err = -EIO; 974 if (is_bad_inode(inode)) 975 goto out; 976 977 data.file = file; 978 data.inode = inode; 979 data.nr_pages = nr_pages; 980 data.max_pages = min_t(unsigned int, nr_pages, fc->max_pages); 981 ; 982 data.ia = fuse_io_alloc(NULL, data.max_pages); 983 err = -ENOMEM; 984 if (!data.ia) 985 goto out; 986 987 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); 988 if (!err) { 989 if (data.ia->ap.num_pages) 990 fuse_send_readpages(data.ia, file); 991 else 992 fuse_io_free(data.ia); 993 } 994 out: 995 return err; 996 } 997 998 static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to) 999 { 1000 struct inode *inode = iocb->ki_filp->f_mapping->host; 1001 struct fuse_conn *fc = get_fuse_conn(inode); 1002 1003 /* 1004 * In auto invalidate mode, always update attributes on read. 1005 * Otherwise, only update if we attempt to read past EOF (to ensure 1006 * i_size is up to date). 1007 */ 1008 if (fc->auto_inval_data || 1009 (iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) { 1010 int err; 1011 err = fuse_update_attributes(inode, iocb->ki_filp); 1012 if (err) 1013 return err; 1014 } 1015 1016 return generic_file_read_iter(iocb, to); 1017 } 1018 1019 static void fuse_write_args_fill(struct fuse_io_args *ia, struct fuse_file *ff, 1020 loff_t pos, size_t count) 1021 { 1022 struct fuse_args *args = &ia->ap.args; 1023 1024 ia->write.in.fh = ff->fh; 1025 ia->write.in.offset = pos; 1026 ia->write.in.size = count; 1027 args->opcode = FUSE_WRITE; 1028 args->nodeid = ff->nodeid; 1029 args->in_numargs = 2; 1030 if (ff->fc->minor < 9) 1031 args->in_args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; 1032 else 1033 args->in_args[0].size = sizeof(ia->write.in); 1034 args->in_args[0].value = &ia->write.in; 1035 args->in_args[1].size = count; 1036 args->out_numargs = 1; 1037 args->out_args[0].size = sizeof(ia->write.out); 1038 args->out_args[0].value = &ia->write.out; 1039 } 1040 1041 static unsigned int fuse_write_flags(struct kiocb *iocb) 1042 { 1043 unsigned int flags = iocb->ki_filp->f_flags; 1044 1045 if (iocb->ki_flags & IOCB_DSYNC) 1046 flags |= O_DSYNC; 1047 if (iocb->ki_flags & IOCB_SYNC) 1048 flags |= O_SYNC; 1049 1050 return flags; 1051 } 1052 1053 static ssize_t fuse_send_write(struct fuse_io_args *ia, loff_t pos, 1054 size_t count, fl_owner_t owner) 1055 { 1056 struct kiocb *iocb = ia->io->iocb; 1057 struct file *file = iocb->ki_filp; 1058 struct fuse_file *ff = file->private_data; 1059 struct fuse_conn *fc = ff->fc; 1060 struct fuse_write_in *inarg = &ia->write.in; 1061 ssize_t err; 1062 1063 fuse_write_args_fill(ia, ff, pos, count); 1064 inarg->flags = fuse_write_flags(iocb); 1065 if (owner != NULL) { 1066 inarg->write_flags |= FUSE_WRITE_LOCKOWNER; 1067 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 1068 } 1069 1070 if (ia->io->async) 1071 return fuse_async_req_send(fc, ia, count); 1072 1073 err = fuse_simple_request(fc, &ia->ap.args); 1074 if (!err && ia->write.out.size > count) 1075 err = -EIO; 1076 1077 return err ?: ia->write.out.size; 1078 } 1079 1080 bool fuse_write_update_size(struct inode *inode, loff_t pos) 1081 { 1082 struct fuse_conn *fc = get_fuse_conn(inode); 1083 struct fuse_inode *fi = get_fuse_inode(inode); 1084 bool ret = false; 1085 1086 spin_lock(&fi->lock); 1087 fi->attr_version = atomic64_inc_return(&fc->attr_version); 1088 if (pos > inode->i_size) { 1089 i_size_write(inode, pos); 1090 ret = true; 1091 } 1092 spin_unlock(&fi->lock); 1093 1094 return ret; 1095 } 1096 1097 static ssize_t fuse_send_write_pages(struct fuse_io_args *ia, 1098 struct kiocb *iocb, struct inode *inode, 1099 loff_t pos, size_t count) 1100 { 1101 struct fuse_args_pages *ap = &ia->ap; 1102 struct file *file = iocb->ki_filp; 1103 struct fuse_file *ff = file->private_data; 1104 struct fuse_conn *fc = ff->fc; 1105 unsigned int offset, i; 1106 int err; 1107 1108 for (i = 0; i < ap->num_pages; i++) 1109 fuse_wait_on_page_writeback(inode, ap->pages[i]->index); 1110 1111 fuse_write_args_fill(ia, ff, pos, count); 1112 ia->write.in.flags = fuse_write_flags(iocb); 1113 1114 err = fuse_simple_request(fc, &ap->args); 1115 if (!err && ia->write.out.size > count) 1116 err = -EIO; 1117 1118 offset = ap->descs[0].offset; 1119 count = ia->write.out.size; 1120 for (i = 0; i < ap->num_pages; i++) { 1121 struct page *page = ap->pages[i]; 1122 1123 if (!err && !offset && count >= PAGE_SIZE) 1124 SetPageUptodate(page); 1125 1126 if (count > PAGE_SIZE - offset) 1127 count -= PAGE_SIZE - offset; 1128 else 1129 count = 0; 1130 offset = 0; 1131 1132 unlock_page(page); 1133 put_page(page); 1134 } 1135 1136 return err; 1137 } 1138 1139 static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap, 1140 struct address_space *mapping, 1141 struct iov_iter *ii, loff_t pos, 1142 unsigned int max_pages) 1143 { 1144 struct fuse_conn *fc = get_fuse_conn(mapping->host); 1145 unsigned offset = pos & (PAGE_SIZE - 1); 1146 size_t count = 0; 1147 int err; 1148 1149 ap->args.in_pages = true; 1150 ap->descs[0].offset = offset; 1151 1152 do { 1153 size_t tmp; 1154 struct page *page; 1155 pgoff_t index = pos >> PAGE_SHIFT; 1156 size_t bytes = min_t(size_t, PAGE_SIZE - offset, 1157 iov_iter_count(ii)); 1158 1159 bytes = min_t(size_t, bytes, fc->max_write - count); 1160 1161 again: 1162 err = -EFAULT; 1163 if (iov_iter_fault_in_readable(ii, bytes)) 1164 break; 1165 1166 err = -ENOMEM; 1167 page = grab_cache_page_write_begin(mapping, index, 0); 1168 if (!page) 1169 break; 1170 1171 if (mapping_writably_mapped(mapping)) 1172 flush_dcache_page(page); 1173 1174 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); 1175 flush_dcache_page(page); 1176 1177 iov_iter_advance(ii, tmp); 1178 if (!tmp) { 1179 unlock_page(page); 1180 put_page(page); 1181 bytes = min(bytes, iov_iter_single_seg_count(ii)); 1182 goto again; 1183 } 1184 1185 err = 0; 1186 ap->pages[ap->num_pages] = page; 1187 ap->descs[ap->num_pages].length = tmp; 1188 ap->num_pages++; 1189 1190 count += tmp; 1191 pos += tmp; 1192 offset += tmp; 1193 if (offset == PAGE_SIZE) 1194 offset = 0; 1195 1196 if (!fc->big_writes) 1197 break; 1198 } while (iov_iter_count(ii) && count < fc->max_write && 1199 ap->num_pages < max_pages && offset == 0); 1200 1201 return count > 0 ? count : err; 1202 } 1203 1204 static inline unsigned int fuse_wr_pages(loff_t pos, size_t len, 1205 unsigned int max_pages) 1206 { 1207 return min_t(unsigned int, 1208 ((pos + len - 1) >> PAGE_SHIFT) - 1209 (pos >> PAGE_SHIFT) + 1, 1210 max_pages); 1211 } 1212 1213 static ssize_t fuse_perform_write(struct kiocb *iocb, 1214 struct address_space *mapping, 1215 struct iov_iter *ii, loff_t pos) 1216 { 1217 struct inode *inode = mapping->host; 1218 struct fuse_conn *fc = get_fuse_conn(inode); 1219 struct fuse_inode *fi = get_fuse_inode(inode); 1220 int err = 0; 1221 ssize_t res = 0; 1222 1223 if (inode->i_size < pos + iov_iter_count(ii)) 1224 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 1225 1226 do { 1227 ssize_t count; 1228 struct fuse_io_args ia = {}; 1229 struct fuse_args_pages *ap = &ia.ap; 1230 unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii), 1231 fc->max_pages); 1232 1233 ap->pages = fuse_pages_alloc(nr_pages, GFP_KERNEL, &ap->descs); 1234 if (!ap->pages) { 1235 err = -ENOMEM; 1236 break; 1237 } 1238 1239 count = fuse_fill_write_pages(ap, mapping, ii, pos, nr_pages); 1240 if (count <= 0) { 1241 err = count; 1242 } else { 1243 err = fuse_send_write_pages(&ia, iocb, inode, 1244 pos, count); 1245 if (!err) { 1246 size_t num_written = ia.write.out.size; 1247 1248 res += num_written; 1249 pos += num_written; 1250 1251 /* break out of the loop on short write */ 1252 if (num_written != count) 1253 err = -EIO; 1254 } 1255 } 1256 kfree(ap->pages); 1257 } while (!err && iov_iter_count(ii)); 1258 1259 if (res > 0) 1260 fuse_write_update_size(inode, pos); 1261 1262 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 1263 fuse_invalidate_attr(inode); 1264 1265 return res > 0 ? res : err; 1266 } 1267 1268 static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) 1269 { 1270 struct file *file = iocb->ki_filp; 1271 struct address_space *mapping = file->f_mapping; 1272 ssize_t written = 0; 1273 ssize_t written_buffered = 0; 1274 struct inode *inode = mapping->host; 1275 ssize_t err; 1276 loff_t endbyte = 0; 1277 1278 if (get_fuse_conn(inode)->writeback_cache) { 1279 /* Update size (EOF optimization) and mode (SUID clearing) */ 1280 err = fuse_update_attributes(mapping->host, file); 1281 if (err) 1282 return err; 1283 1284 return generic_file_write_iter(iocb, from); 1285 } 1286 1287 inode_lock(inode); 1288 1289 /* We can write back this queue in page reclaim */ 1290 current->backing_dev_info = inode_to_bdi(inode); 1291 1292 err = generic_write_checks(iocb, from); 1293 if (err <= 0) 1294 goto out; 1295 1296 err = file_remove_privs(file); 1297 if (err) 1298 goto out; 1299 1300 err = file_update_time(file); 1301 if (err) 1302 goto out; 1303 1304 if (iocb->ki_flags & IOCB_DIRECT) { 1305 loff_t pos = iocb->ki_pos; 1306 written = generic_file_direct_write(iocb, from); 1307 if (written < 0 || !iov_iter_count(from)) 1308 goto out; 1309 1310 pos += written; 1311 1312 written_buffered = fuse_perform_write(iocb, mapping, from, pos); 1313 if (written_buffered < 0) { 1314 err = written_buffered; 1315 goto out; 1316 } 1317 endbyte = pos + written_buffered - 1; 1318 1319 err = filemap_write_and_wait_range(file->f_mapping, pos, 1320 endbyte); 1321 if (err) 1322 goto out; 1323 1324 invalidate_mapping_pages(file->f_mapping, 1325 pos >> PAGE_SHIFT, 1326 endbyte >> PAGE_SHIFT); 1327 1328 written += written_buffered; 1329 iocb->ki_pos = pos + written_buffered; 1330 } else { 1331 written = fuse_perform_write(iocb, mapping, from, iocb->ki_pos); 1332 if (written >= 0) 1333 iocb->ki_pos += written; 1334 } 1335 out: 1336 current->backing_dev_info = NULL; 1337 inode_unlock(inode); 1338 if (written > 0) 1339 written = generic_write_sync(iocb, written); 1340 1341 return written ? written : err; 1342 } 1343 1344 static inline void fuse_page_descs_length_init(struct fuse_page_desc *descs, 1345 unsigned int index, 1346 unsigned int nr_pages) 1347 { 1348 int i; 1349 1350 for (i = index; i < index + nr_pages; i++) 1351 descs[i].length = PAGE_SIZE - descs[i].offset; 1352 } 1353 1354 static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii) 1355 { 1356 return (unsigned long)ii->iov->iov_base + ii->iov_offset; 1357 } 1358 1359 static inline size_t fuse_get_frag_size(const struct iov_iter *ii, 1360 size_t max_size) 1361 { 1362 return min(iov_iter_single_seg_count(ii), max_size); 1363 } 1364 1365 static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii, 1366 size_t *nbytesp, int write, 1367 unsigned int max_pages) 1368 { 1369 size_t nbytes = 0; /* # bytes already packed in req */ 1370 ssize_t ret = 0; 1371 1372 /* Special case for kernel I/O: can copy directly into the buffer */ 1373 if (iov_iter_is_kvec(ii)) { 1374 unsigned long user_addr = fuse_get_user_addr(ii); 1375 size_t frag_size = fuse_get_frag_size(ii, *nbytesp); 1376 1377 if (write) 1378 ap->args.in_args[1].value = (void *) user_addr; 1379 else 1380 ap->args.out_args[0].value = (void *) user_addr; 1381 1382 iov_iter_advance(ii, frag_size); 1383 *nbytesp = frag_size; 1384 return 0; 1385 } 1386 1387 while (nbytes < *nbytesp && ap->num_pages < max_pages) { 1388 unsigned npages; 1389 size_t start; 1390 ret = iov_iter_get_pages(ii, &ap->pages[ap->num_pages], 1391 *nbytesp - nbytes, 1392 max_pages - ap->num_pages, 1393 &start); 1394 if (ret < 0) 1395 break; 1396 1397 iov_iter_advance(ii, ret); 1398 nbytes += ret; 1399 1400 ret += start; 1401 npages = (ret + PAGE_SIZE - 1) / PAGE_SIZE; 1402 1403 ap->descs[ap->num_pages].offset = start; 1404 fuse_page_descs_length_init(ap->descs, ap->num_pages, npages); 1405 1406 ap->num_pages += npages; 1407 ap->descs[ap->num_pages - 1].length -= 1408 (PAGE_SIZE - ret) & (PAGE_SIZE - 1); 1409 } 1410 1411 if (write) 1412 ap->args.in_pages = true; 1413 else 1414 ap->args.out_pages = true; 1415 1416 *nbytesp = nbytes; 1417 1418 return ret < 0 ? ret : 0; 1419 } 1420 1421 ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, 1422 loff_t *ppos, int flags) 1423 { 1424 int write = flags & FUSE_DIO_WRITE; 1425 int cuse = flags & FUSE_DIO_CUSE; 1426 struct file *file = io->iocb->ki_filp; 1427 struct inode *inode = file->f_mapping->host; 1428 struct fuse_file *ff = file->private_data; 1429 struct fuse_conn *fc = ff->fc; 1430 size_t nmax = write ? fc->max_write : fc->max_read; 1431 loff_t pos = *ppos; 1432 size_t count = iov_iter_count(iter); 1433 pgoff_t idx_from = pos >> PAGE_SHIFT; 1434 pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT; 1435 ssize_t res = 0; 1436 int err = 0; 1437 struct fuse_io_args *ia; 1438 unsigned int max_pages; 1439 1440 max_pages = iov_iter_npages(iter, fc->max_pages); 1441 ia = fuse_io_alloc(io, max_pages); 1442 if (!ia) 1443 return -ENOMEM; 1444 1445 ia->io = io; 1446 if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) { 1447 if (!write) 1448 inode_lock(inode); 1449 fuse_sync_writes(inode); 1450 if (!write) 1451 inode_unlock(inode); 1452 } 1453 1454 io->should_dirty = !write && iter_is_iovec(iter); 1455 while (count) { 1456 ssize_t nres; 1457 fl_owner_t owner = current->files; 1458 size_t nbytes = min(count, nmax); 1459 1460 err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write, 1461 max_pages); 1462 if (err && !nbytes) 1463 break; 1464 1465 if (write) { 1466 if (!capable(CAP_FSETID)) 1467 ia->write.in.write_flags |= FUSE_WRITE_KILL_PRIV; 1468 1469 nres = fuse_send_write(ia, pos, nbytes, owner); 1470 } else { 1471 nres = fuse_send_read(ia, pos, nbytes, owner); 1472 } 1473 1474 if (!io->async || nres < 0) { 1475 fuse_release_user_pages(&ia->ap, io->should_dirty); 1476 fuse_io_free(ia); 1477 } 1478 ia = NULL; 1479 if (nres < 0) { 1480 iov_iter_revert(iter, nbytes); 1481 err = nres; 1482 break; 1483 } 1484 WARN_ON(nres > nbytes); 1485 1486 count -= nres; 1487 res += nres; 1488 pos += nres; 1489 if (nres != nbytes) { 1490 iov_iter_revert(iter, nbytes - nres); 1491 break; 1492 } 1493 if (count) { 1494 max_pages = iov_iter_npages(iter, fc->max_pages); 1495 ia = fuse_io_alloc(io, max_pages); 1496 if (!ia) 1497 break; 1498 } 1499 } 1500 if (ia) 1501 fuse_io_free(ia); 1502 if (res > 0) 1503 *ppos = pos; 1504 1505 return res > 0 ? res : err; 1506 } 1507 EXPORT_SYMBOL_GPL(fuse_direct_io); 1508 1509 static ssize_t __fuse_direct_read(struct fuse_io_priv *io, 1510 struct iov_iter *iter, 1511 loff_t *ppos) 1512 { 1513 ssize_t res; 1514 struct inode *inode = file_inode(io->iocb->ki_filp); 1515 1516 res = fuse_direct_io(io, iter, ppos, 0); 1517 1518 fuse_invalidate_atime(inode); 1519 1520 return res; 1521 } 1522 1523 static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter); 1524 1525 static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to) 1526 { 1527 ssize_t res; 1528 1529 if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) { 1530 res = fuse_direct_IO(iocb, to); 1531 } else { 1532 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); 1533 1534 res = __fuse_direct_read(&io, to, &iocb->ki_pos); 1535 } 1536 1537 return res; 1538 } 1539 1540 static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) 1541 { 1542 struct inode *inode = file_inode(iocb->ki_filp); 1543 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); 1544 ssize_t res; 1545 1546 /* Don't allow parallel writes to the same file */ 1547 inode_lock(inode); 1548 res = generic_write_checks(iocb, from); 1549 if (res > 0) { 1550 if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) { 1551 res = fuse_direct_IO(iocb, from); 1552 } else { 1553 res = fuse_direct_io(&io, from, &iocb->ki_pos, 1554 FUSE_DIO_WRITE); 1555 } 1556 } 1557 fuse_invalidate_attr(inode); 1558 if (res > 0) 1559 fuse_write_update_size(inode, iocb->ki_pos); 1560 inode_unlock(inode); 1561 1562 return res; 1563 } 1564 1565 static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 1566 { 1567 struct file *file = iocb->ki_filp; 1568 struct fuse_file *ff = file->private_data; 1569 1570 if (is_bad_inode(file_inode(file))) 1571 return -EIO; 1572 1573 if (!(ff->open_flags & FOPEN_DIRECT_IO)) 1574 return fuse_cache_read_iter(iocb, to); 1575 else 1576 return fuse_direct_read_iter(iocb, to); 1577 } 1578 1579 static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1580 { 1581 struct file *file = iocb->ki_filp; 1582 struct fuse_file *ff = file->private_data; 1583 1584 if (is_bad_inode(file_inode(file))) 1585 return -EIO; 1586 1587 if (!(ff->open_flags & FOPEN_DIRECT_IO)) 1588 return fuse_cache_write_iter(iocb, from); 1589 else 1590 return fuse_direct_write_iter(iocb, from); 1591 } 1592 1593 static void fuse_writepage_free(struct fuse_writepage_args *wpa) 1594 { 1595 struct fuse_args_pages *ap = &wpa->ia.ap; 1596 int i; 1597 1598 for (i = 0; i < ap->num_pages; i++) 1599 __free_page(ap->pages[i]); 1600 1601 if (wpa->ia.ff) 1602 fuse_file_put(wpa->ia.ff, false, false); 1603 1604 kfree(ap->pages); 1605 kfree(wpa); 1606 } 1607 1608 static void fuse_writepage_finish(struct fuse_conn *fc, 1609 struct fuse_writepage_args *wpa) 1610 { 1611 struct fuse_args_pages *ap = &wpa->ia.ap; 1612 struct inode *inode = wpa->inode; 1613 struct fuse_inode *fi = get_fuse_inode(inode); 1614 struct backing_dev_info *bdi = inode_to_bdi(inode); 1615 int i; 1616 1617 list_del(&wpa->writepages_entry); 1618 for (i = 0; i < ap->num_pages; i++) { 1619 dec_wb_stat(&bdi->wb, WB_WRITEBACK); 1620 dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP); 1621 wb_writeout_inc(&bdi->wb); 1622 } 1623 wake_up(&fi->page_waitq); 1624 } 1625 1626 /* Called under fi->lock, may release and reacquire it */ 1627 static void fuse_send_writepage(struct fuse_conn *fc, 1628 struct fuse_writepage_args *wpa, loff_t size) 1629 __releases(fi->lock) 1630 __acquires(fi->lock) 1631 { 1632 struct fuse_writepage_args *aux, *next; 1633 struct fuse_inode *fi = get_fuse_inode(wpa->inode); 1634 struct fuse_write_in *inarg = &wpa->ia.write.in; 1635 struct fuse_args *args = &wpa->ia.ap.args; 1636 __u64 data_size = wpa->ia.ap.num_pages * PAGE_SIZE; 1637 int err; 1638 1639 fi->writectr++; 1640 if (inarg->offset + data_size <= size) { 1641 inarg->size = data_size; 1642 } else if (inarg->offset < size) { 1643 inarg->size = size - inarg->offset; 1644 } else { 1645 /* Got truncated off completely */ 1646 goto out_free; 1647 } 1648 1649 args->in_args[1].size = inarg->size; 1650 args->force = true; 1651 args->nocreds = true; 1652 1653 err = fuse_simple_background(fc, args, GFP_ATOMIC); 1654 if (err == -ENOMEM) { 1655 spin_unlock(&fi->lock); 1656 err = fuse_simple_background(fc, args, GFP_NOFS | __GFP_NOFAIL); 1657 spin_lock(&fi->lock); 1658 } 1659 1660 /* Fails on broken connection only */ 1661 if (unlikely(err)) 1662 goto out_free; 1663 1664 return; 1665 1666 out_free: 1667 fi->writectr--; 1668 fuse_writepage_finish(fc, wpa); 1669 spin_unlock(&fi->lock); 1670 1671 /* After fuse_writepage_finish() aux request list is private */ 1672 for (aux = wpa->next; aux; aux = next) { 1673 next = aux->next; 1674 aux->next = NULL; 1675 fuse_writepage_free(aux); 1676 } 1677 1678 fuse_writepage_free(wpa); 1679 spin_lock(&fi->lock); 1680 } 1681 1682 /* 1683 * If fi->writectr is positive (no truncate or fsync going on) send 1684 * all queued writepage requests. 1685 * 1686 * Called with fi->lock 1687 */ 1688 void fuse_flush_writepages(struct inode *inode) 1689 __releases(fi->lock) 1690 __acquires(fi->lock) 1691 { 1692 struct fuse_conn *fc = get_fuse_conn(inode); 1693 struct fuse_inode *fi = get_fuse_inode(inode); 1694 loff_t crop = i_size_read(inode); 1695 struct fuse_writepage_args *wpa; 1696 1697 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { 1698 wpa = list_entry(fi->queued_writes.next, 1699 struct fuse_writepage_args, queue_entry); 1700 list_del_init(&wpa->queue_entry); 1701 fuse_send_writepage(fc, wpa, crop); 1702 } 1703 } 1704 1705 static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_args *args, 1706 int error) 1707 { 1708 struct fuse_writepage_args *wpa = 1709 container_of(args, typeof(*wpa), ia.ap.args); 1710 struct inode *inode = wpa->inode; 1711 struct fuse_inode *fi = get_fuse_inode(inode); 1712 1713 mapping_set_error(inode->i_mapping, error); 1714 spin_lock(&fi->lock); 1715 while (wpa->next) { 1716 struct fuse_conn *fc = get_fuse_conn(inode); 1717 struct fuse_write_in *inarg = &wpa->ia.write.in; 1718 struct fuse_writepage_args *next = wpa->next; 1719 1720 wpa->next = next->next; 1721 next->next = NULL; 1722 next->ia.ff = fuse_file_get(wpa->ia.ff); 1723 list_add(&next->writepages_entry, &fi->writepages); 1724 1725 /* 1726 * Skip fuse_flush_writepages() to make it easy to crop requests 1727 * based on primary request size. 1728 * 1729 * 1st case (trivial): there are no concurrent activities using 1730 * fuse_set/release_nowrite. Then we're on safe side because 1731 * fuse_flush_writepages() would call fuse_send_writepage() 1732 * anyway. 1733 * 1734 * 2nd case: someone called fuse_set_nowrite and it is waiting 1735 * now for completion of all in-flight requests. This happens 1736 * rarely and no more than once per page, so this should be 1737 * okay. 1738 * 1739 * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle 1740 * of fuse_set_nowrite..fuse_release_nowrite section. The fact 1741 * that fuse_set_nowrite returned implies that all in-flight 1742 * requests were completed along with all of their secondary 1743 * requests. Further primary requests are blocked by negative 1744 * writectr. Hence there cannot be any in-flight requests and 1745 * no invocations of fuse_writepage_end() while we're in 1746 * fuse_set_nowrite..fuse_release_nowrite section. 1747 */ 1748 fuse_send_writepage(fc, next, inarg->offset + inarg->size); 1749 } 1750 fi->writectr--; 1751 fuse_writepage_finish(fc, wpa); 1752 spin_unlock(&fi->lock); 1753 fuse_writepage_free(wpa); 1754 } 1755 1756 static struct fuse_file *__fuse_write_file_get(struct fuse_conn *fc, 1757 struct fuse_inode *fi) 1758 { 1759 struct fuse_file *ff = NULL; 1760 1761 spin_lock(&fi->lock); 1762 if (!list_empty(&fi->write_files)) { 1763 ff = list_entry(fi->write_files.next, struct fuse_file, 1764 write_entry); 1765 fuse_file_get(ff); 1766 } 1767 spin_unlock(&fi->lock); 1768 1769 return ff; 1770 } 1771 1772 static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc, 1773 struct fuse_inode *fi) 1774 { 1775 struct fuse_file *ff = __fuse_write_file_get(fc, fi); 1776 WARN_ON(!ff); 1777 return ff; 1778 } 1779 1780 int fuse_write_inode(struct inode *inode, struct writeback_control *wbc) 1781 { 1782 struct fuse_conn *fc = get_fuse_conn(inode); 1783 struct fuse_inode *fi = get_fuse_inode(inode); 1784 struct fuse_file *ff; 1785 int err; 1786 1787 ff = __fuse_write_file_get(fc, fi); 1788 err = fuse_flush_times(inode, ff); 1789 if (ff) 1790 fuse_file_put(ff, false, false); 1791 1792 return err; 1793 } 1794 1795 static struct fuse_writepage_args *fuse_writepage_args_alloc(void) 1796 { 1797 struct fuse_writepage_args *wpa; 1798 struct fuse_args_pages *ap; 1799 1800 wpa = kzalloc(sizeof(*wpa), GFP_NOFS); 1801 if (wpa) { 1802 ap = &wpa->ia.ap; 1803 ap->num_pages = 0; 1804 ap->pages = fuse_pages_alloc(1, GFP_NOFS, &ap->descs); 1805 if (!ap->pages) { 1806 kfree(wpa); 1807 wpa = NULL; 1808 } 1809 } 1810 return wpa; 1811 1812 } 1813 1814 static int fuse_writepage_locked(struct page *page) 1815 { 1816 struct address_space *mapping = page->mapping; 1817 struct inode *inode = mapping->host; 1818 struct fuse_conn *fc = get_fuse_conn(inode); 1819 struct fuse_inode *fi = get_fuse_inode(inode); 1820 struct fuse_writepage_args *wpa; 1821 struct fuse_args_pages *ap; 1822 struct page *tmp_page; 1823 int error = -ENOMEM; 1824 1825 set_page_writeback(page); 1826 1827 wpa = fuse_writepage_args_alloc(); 1828 if (!wpa) 1829 goto err; 1830 ap = &wpa->ia.ap; 1831 1832 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 1833 if (!tmp_page) 1834 goto err_free; 1835 1836 error = -EIO; 1837 wpa->ia.ff = fuse_write_file_get(fc, fi); 1838 if (!wpa->ia.ff) 1839 goto err_nofile; 1840 1841 fuse_write_args_fill(&wpa->ia, wpa->ia.ff, page_offset(page), 0); 1842 1843 copy_highpage(tmp_page, page); 1844 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE; 1845 wpa->next = NULL; 1846 ap->args.in_pages = true; 1847 ap->num_pages = 1; 1848 ap->pages[0] = tmp_page; 1849 ap->descs[0].offset = 0; 1850 ap->descs[0].length = PAGE_SIZE; 1851 ap->args.end = fuse_writepage_end; 1852 wpa->inode = inode; 1853 1854 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); 1855 inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP); 1856 1857 spin_lock(&fi->lock); 1858 list_add(&wpa->writepages_entry, &fi->writepages); 1859 list_add_tail(&wpa->queue_entry, &fi->queued_writes); 1860 fuse_flush_writepages(inode); 1861 spin_unlock(&fi->lock); 1862 1863 end_page_writeback(page); 1864 1865 return 0; 1866 1867 err_nofile: 1868 __free_page(tmp_page); 1869 err_free: 1870 kfree(wpa); 1871 err: 1872 mapping_set_error(page->mapping, error); 1873 end_page_writeback(page); 1874 return error; 1875 } 1876 1877 static int fuse_writepage(struct page *page, struct writeback_control *wbc) 1878 { 1879 int err; 1880 1881 if (fuse_page_is_writeback(page->mapping->host, page->index)) { 1882 /* 1883 * ->writepages() should be called for sync() and friends. We 1884 * should only get here on direct reclaim and then we are 1885 * allowed to skip a page which is already in flight 1886 */ 1887 WARN_ON(wbc->sync_mode == WB_SYNC_ALL); 1888 1889 redirty_page_for_writepage(wbc, page); 1890 unlock_page(page); 1891 return 0; 1892 } 1893 1894 err = fuse_writepage_locked(page); 1895 unlock_page(page); 1896 1897 return err; 1898 } 1899 1900 struct fuse_fill_wb_data { 1901 struct fuse_writepage_args *wpa; 1902 struct fuse_file *ff; 1903 struct inode *inode; 1904 struct page **orig_pages; 1905 unsigned int max_pages; 1906 }; 1907 1908 static bool fuse_pages_realloc(struct fuse_fill_wb_data *data) 1909 { 1910 struct fuse_args_pages *ap = &data->wpa->ia.ap; 1911 struct fuse_conn *fc = get_fuse_conn(data->inode); 1912 struct page **pages; 1913 struct fuse_page_desc *descs; 1914 unsigned int npages = min_t(unsigned int, 1915 max_t(unsigned int, data->max_pages * 2, 1916 FUSE_DEFAULT_MAX_PAGES_PER_REQ), 1917 fc->max_pages); 1918 WARN_ON(npages <= data->max_pages); 1919 1920 pages = fuse_pages_alloc(npages, GFP_NOFS, &descs); 1921 if (!pages) 1922 return false; 1923 1924 memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages); 1925 memcpy(descs, ap->descs, sizeof(struct fuse_page_desc) * ap->num_pages); 1926 kfree(ap->pages); 1927 ap->pages = pages; 1928 ap->descs = descs; 1929 data->max_pages = npages; 1930 1931 return true; 1932 } 1933 1934 static void fuse_writepages_send(struct fuse_fill_wb_data *data) 1935 { 1936 struct fuse_writepage_args *wpa = data->wpa; 1937 struct inode *inode = data->inode; 1938 struct fuse_inode *fi = get_fuse_inode(inode); 1939 int num_pages = wpa->ia.ap.num_pages; 1940 int i; 1941 1942 wpa->ia.ff = fuse_file_get(data->ff); 1943 spin_lock(&fi->lock); 1944 list_add_tail(&wpa->queue_entry, &fi->queued_writes); 1945 fuse_flush_writepages(inode); 1946 spin_unlock(&fi->lock); 1947 1948 for (i = 0; i < num_pages; i++) 1949 end_page_writeback(data->orig_pages[i]); 1950 } 1951 1952 /* 1953 * First recheck under fi->lock if the offending offset is still under 1954 * writeback. If yes, then iterate auxiliary write requests, to see if there's 1955 * one already added for a page at this offset. If there's none, then insert 1956 * this new request onto the auxiliary list, otherwise reuse the existing one by 1957 * copying the new page contents over to the old temporary page. 1958 */ 1959 static bool fuse_writepage_in_flight(struct fuse_writepage_args *new_wpa, 1960 struct page *page) 1961 { 1962 struct fuse_inode *fi = get_fuse_inode(new_wpa->inode); 1963 struct fuse_writepage_args *tmp; 1964 struct fuse_writepage_args *old_wpa; 1965 struct fuse_args_pages *new_ap = &new_wpa->ia.ap; 1966 1967 WARN_ON(new_ap->num_pages != 0); 1968 1969 spin_lock(&fi->lock); 1970 list_del(&new_wpa->writepages_entry); 1971 old_wpa = fuse_find_writeback(fi, page->index, page->index); 1972 if (!old_wpa) { 1973 list_add(&new_wpa->writepages_entry, &fi->writepages); 1974 spin_unlock(&fi->lock); 1975 return false; 1976 } 1977 1978 new_ap->num_pages = 1; 1979 for (tmp = old_wpa->next; tmp; tmp = tmp->next) { 1980 pgoff_t curr_index; 1981 1982 WARN_ON(tmp->inode != new_wpa->inode); 1983 curr_index = tmp->ia.write.in.offset >> PAGE_SHIFT; 1984 if (curr_index == page->index) { 1985 WARN_ON(tmp->ia.ap.num_pages != 1); 1986 swap(tmp->ia.ap.pages[0], new_ap->pages[0]); 1987 break; 1988 } 1989 } 1990 1991 if (!tmp) { 1992 new_wpa->next = old_wpa->next; 1993 old_wpa->next = new_wpa; 1994 } 1995 1996 spin_unlock(&fi->lock); 1997 1998 if (tmp) { 1999 struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode); 2000 2001 dec_wb_stat(&bdi->wb, WB_WRITEBACK); 2002 dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP); 2003 wb_writeout_inc(&bdi->wb); 2004 fuse_writepage_free(new_wpa); 2005 } 2006 2007 return true; 2008 } 2009 2010 static int fuse_writepages_fill(struct page *page, 2011 struct writeback_control *wbc, void *_data) 2012 { 2013 struct fuse_fill_wb_data *data = _data; 2014 struct fuse_writepage_args *wpa = data->wpa; 2015 struct fuse_args_pages *ap = &wpa->ia.ap; 2016 struct inode *inode = data->inode; 2017 struct fuse_inode *fi = get_fuse_inode(inode); 2018 struct fuse_conn *fc = get_fuse_conn(inode); 2019 struct page *tmp_page; 2020 bool is_writeback; 2021 int err; 2022 2023 if (!data->ff) { 2024 err = -EIO; 2025 data->ff = fuse_write_file_get(fc, fi); 2026 if (!data->ff) 2027 goto out_unlock; 2028 } 2029 2030 /* 2031 * Being under writeback is unlikely but possible. For example direct 2032 * read to an mmaped fuse file will set the page dirty twice; once when 2033 * the pages are faulted with get_user_pages(), and then after the read 2034 * completed. 2035 */ 2036 is_writeback = fuse_page_is_writeback(inode, page->index); 2037 2038 if (wpa && ap->num_pages && 2039 (is_writeback || ap->num_pages == fc->max_pages || 2040 (ap->num_pages + 1) * PAGE_SIZE > fc->max_write || 2041 data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)) { 2042 fuse_writepages_send(data); 2043 data->wpa = NULL; 2044 } else if (wpa && ap->num_pages == data->max_pages) { 2045 if (!fuse_pages_realloc(data)) { 2046 fuse_writepages_send(data); 2047 data->wpa = NULL; 2048 } 2049 } 2050 2051 err = -ENOMEM; 2052 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 2053 if (!tmp_page) 2054 goto out_unlock; 2055 2056 /* 2057 * The page must not be redirtied until the writeout is completed 2058 * (i.e. userspace has sent a reply to the write request). Otherwise 2059 * there could be more than one temporary page instance for each real 2060 * page. 2061 * 2062 * This is ensured by holding the page lock in page_mkwrite() while 2063 * checking fuse_page_is_writeback(). We already hold the page lock 2064 * since clear_page_dirty_for_io() and keep it held until we add the 2065 * request to the fi->writepages list and increment ap->num_pages. 2066 * After this fuse_page_is_writeback() will indicate that the page is 2067 * under writeback, so we can release the page lock. 2068 */ 2069 if (data->wpa == NULL) { 2070 err = -ENOMEM; 2071 wpa = fuse_writepage_args_alloc(); 2072 if (!wpa) { 2073 __free_page(tmp_page); 2074 goto out_unlock; 2075 } 2076 data->max_pages = 1; 2077 2078 ap = &wpa->ia.ap; 2079 fuse_write_args_fill(&wpa->ia, data->ff, page_offset(page), 0); 2080 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE; 2081 wpa->next = NULL; 2082 ap->args.in_pages = true; 2083 ap->args.end = fuse_writepage_end; 2084 ap->num_pages = 0; 2085 wpa->inode = inode; 2086 2087 spin_lock(&fi->lock); 2088 list_add(&wpa->writepages_entry, &fi->writepages); 2089 spin_unlock(&fi->lock); 2090 2091 data->wpa = wpa; 2092 } 2093 set_page_writeback(page); 2094 2095 copy_highpage(tmp_page, page); 2096 ap->pages[ap->num_pages] = tmp_page; 2097 ap->descs[ap->num_pages].offset = 0; 2098 ap->descs[ap->num_pages].length = PAGE_SIZE; 2099 2100 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); 2101 inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP); 2102 2103 err = 0; 2104 if (is_writeback && fuse_writepage_in_flight(wpa, page)) { 2105 end_page_writeback(page); 2106 data->wpa = NULL; 2107 goto out_unlock; 2108 } 2109 data->orig_pages[ap->num_pages] = page; 2110 2111 /* 2112 * Protected by fi->lock against concurrent access by 2113 * fuse_page_is_writeback(). 2114 */ 2115 spin_lock(&fi->lock); 2116 ap->num_pages++; 2117 spin_unlock(&fi->lock); 2118 2119 out_unlock: 2120 unlock_page(page); 2121 2122 return err; 2123 } 2124 2125 static int fuse_writepages(struct address_space *mapping, 2126 struct writeback_control *wbc) 2127 { 2128 struct inode *inode = mapping->host; 2129 struct fuse_conn *fc = get_fuse_conn(inode); 2130 struct fuse_fill_wb_data data; 2131 int err; 2132 2133 err = -EIO; 2134 if (is_bad_inode(inode)) 2135 goto out; 2136 2137 data.inode = inode; 2138 data.wpa = NULL; 2139 data.ff = NULL; 2140 2141 err = -ENOMEM; 2142 data.orig_pages = kcalloc(fc->max_pages, 2143 sizeof(struct page *), 2144 GFP_NOFS); 2145 if (!data.orig_pages) 2146 goto out; 2147 2148 err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data); 2149 if (data.wpa) { 2150 /* Ignore errors if we can write at least one page */ 2151 WARN_ON(!data.wpa->ia.ap.num_pages); 2152 fuse_writepages_send(&data); 2153 err = 0; 2154 } 2155 if (data.ff) 2156 fuse_file_put(data.ff, false, false); 2157 2158 kfree(data.orig_pages); 2159 out: 2160 return err; 2161 } 2162 2163 /* 2164 * It's worthy to make sure that space is reserved on disk for the write, 2165 * but how to implement it without killing performance need more thinking. 2166 */ 2167 static int fuse_write_begin(struct file *file, struct address_space *mapping, 2168 loff_t pos, unsigned len, unsigned flags, 2169 struct page **pagep, void **fsdata) 2170 { 2171 pgoff_t index = pos >> PAGE_SHIFT; 2172 struct fuse_conn *fc = get_fuse_conn(file_inode(file)); 2173 struct page *page; 2174 loff_t fsize; 2175 int err = -ENOMEM; 2176 2177 WARN_ON(!fc->writeback_cache); 2178 2179 page = grab_cache_page_write_begin(mapping, index, flags); 2180 if (!page) 2181 goto error; 2182 2183 fuse_wait_on_page_writeback(mapping->host, page->index); 2184 2185 if (PageUptodate(page) || len == PAGE_SIZE) 2186 goto success; 2187 /* 2188 * Check if the start this page comes after the end of file, in which 2189 * case the readpage can be optimized away. 2190 */ 2191 fsize = i_size_read(mapping->host); 2192 if (fsize <= (pos & PAGE_MASK)) { 2193 size_t off = pos & ~PAGE_MASK; 2194 if (off) 2195 zero_user_segment(page, 0, off); 2196 goto success; 2197 } 2198 err = fuse_do_readpage(file, page); 2199 if (err) 2200 goto cleanup; 2201 success: 2202 *pagep = page; 2203 return 0; 2204 2205 cleanup: 2206 unlock_page(page); 2207 put_page(page); 2208 error: 2209 return err; 2210 } 2211 2212 static int fuse_write_end(struct file *file, struct address_space *mapping, 2213 loff_t pos, unsigned len, unsigned copied, 2214 struct page *page, void *fsdata) 2215 { 2216 struct inode *inode = page->mapping->host; 2217 2218 /* Haven't copied anything? Skip zeroing, size extending, dirtying. */ 2219 if (!copied) 2220 goto unlock; 2221 2222 if (!PageUptodate(page)) { 2223 /* Zero any unwritten bytes at the end of the page */ 2224 size_t endoff = (pos + copied) & ~PAGE_MASK; 2225 if (endoff) 2226 zero_user_segment(page, endoff, PAGE_SIZE); 2227 SetPageUptodate(page); 2228 } 2229 2230 fuse_write_update_size(inode, pos + copied); 2231 set_page_dirty(page); 2232 2233 unlock: 2234 unlock_page(page); 2235 put_page(page); 2236 2237 return copied; 2238 } 2239 2240 static int fuse_launder_page(struct page *page) 2241 { 2242 int err = 0; 2243 if (clear_page_dirty_for_io(page)) { 2244 struct inode *inode = page->mapping->host; 2245 err = fuse_writepage_locked(page); 2246 if (!err) 2247 fuse_wait_on_page_writeback(inode, page->index); 2248 } 2249 return err; 2250 } 2251 2252 /* 2253 * Write back dirty pages now, because there may not be any suitable 2254 * open files later 2255 */ 2256 static void fuse_vma_close(struct vm_area_struct *vma) 2257 { 2258 filemap_write_and_wait(vma->vm_file->f_mapping); 2259 } 2260 2261 /* 2262 * Wait for writeback against this page to complete before allowing it 2263 * to be marked dirty again, and hence written back again, possibly 2264 * before the previous writepage completed. 2265 * 2266 * Block here, instead of in ->writepage(), so that the userspace fs 2267 * can only block processes actually operating on the filesystem. 2268 * 2269 * Otherwise unprivileged userspace fs would be able to block 2270 * unrelated: 2271 * 2272 * - page migration 2273 * - sync(2) 2274 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER 2275 */ 2276 static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf) 2277 { 2278 struct page *page = vmf->page; 2279 struct inode *inode = file_inode(vmf->vma->vm_file); 2280 2281 file_update_time(vmf->vma->vm_file); 2282 lock_page(page); 2283 if (page->mapping != inode->i_mapping) { 2284 unlock_page(page); 2285 return VM_FAULT_NOPAGE; 2286 } 2287 2288 fuse_wait_on_page_writeback(inode, page->index); 2289 return VM_FAULT_LOCKED; 2290 } 2291 2292 static const struct vm_operations_struct fuse_file_vm_ops = { 2293 .close = fuse_vma_close, 2294 .fault = filemap_fault, 2295 .map_pages = filemap_map_pages, 2296 .page_mkwrite = fuse_page_mkwrite, 2297 }; 2298 2299 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) 2300 { 2301 struct fuse_file *ff = file->private_data; 2302 2303 if (ff->open_flags & FOPEN_DIRECT_IO) { 2304 /* Can't provide the coherency needed for MAP_SHARED */ 2305 if (vma->vm_flags & VM_MAYSHARE) 2306 return -ENODEV; 2307 2308 invalidate_inode_pages2(file->f_mapping); 2309 2310 return generic_file_mmap(file, vma); 2311 } 2312 2313 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 2314 fuse_link_write_file(file); 2315 2316 file_accessed(file); 2317 vma->vm_ops = &fuse_file_vm_ops; 2318 return 0; 2319 } 2320 2321 static int convert_fuse_file_lock(struct fuse_conn *fc, 2322 const struct fuse_file_lock *ffl, 2323 struct file_lock *fl) 2324 { 2325 switch (ffl->type) { 2326 case F_UNLCK: 2327 break; 2328 2329 case F_RDLCK: 2330 case F_WRLCK: 2331 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX || 2332 ffl->end < ffl->start) 2333 return -EIO; 2334 2335 fl->fl_start = ffl->start; 2336 fl->fl_end = ffl->end; 2337 2338 /* 2339 * Convert pid into init's pid namespace. The locks API will 2340 * translate it into the caller's pid namespace. 2341 */ 2342 rcu_read_lock(); 2343 fl->fl_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns); 2344 rcu_read_unlock(); 2345 break; 2346 2347 default: 2348 return -EIO; 2349 } 2350 fl->fl_type = ffl->type; 2351 return 0; 2352 } 2353 2354 static void fuse_lk_fill(struct fuse_args *args, struct file *file, 2355 const struct file_lock *fl, int opcode, pid_t pid, 2356 int flock, struct fuse_lk_in *inarg) 2357 { 2358 struct inode *inode = file_inode(file); 2359 struct fuse_conn *fc = get_fuse_conn(inode); 2360 struct fuse_file *ff = file->private_data; 2361 2362 memset(inarg, 0, sizeof(*inarg)); 2363 inarg->fh = ff->fh; 2364 inarg->owner = fuse_lock_owner_id(fc, fl->fl_owner); 2365 inarg->lk.start = fl->fl_start; 2366 inarg->lk.end = fl->fl_end; 2367 inarg->lk.type = fl->fl_type; 2368 inarg->lk.pid = pid; 2369 if (flock) 2370 inarg->lk_flags |= FUSE_LK_FLOCK; 2371 args->opcode = opcode; 2372 args->nodeid = get_node_id(inode); 2373 args->in_numargs = 1; 2374 args->in_args[0].size = sizeof(*inarg); 2375 args->in_args[0].value = inarg; 2376 } 2377 2378 static int fuse_getlk(struct file *file, struct file_lock *fl) 2379 { 2380 struct inode *inode = file_inode(file); 2381 struct fuse_conn *fc = get_fuse_conn(inode); 2382 FUSE_ARGS(args); 2383 struct fuse_lk_in inarg; 2384 struct fuse_lk_out outarg; 2385 int err; 2386 2387 fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg); 2388 args.out_numargs = 1; 2389 args.out_args[0].size = sizeof(outarg); 2390 args.out_args[0].value = &outarg; 2391 err = fuse_simple_request(fc, &args); 2392 if (!err) 2393 err = convert_fuse_file_lock(fc, &outarg.lk, fl); 2394 2395 return err; 2396 } 2397 2398 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) 2399 { 2400 struct inode *inode = file_inode(file); 2401 struct fuse_conn *fc = get_fuse_conn(inode); 2402 FUSE_ARGS(args); 2403 struct fuse_lk_in inarg; 2404 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; 2405 struct pid *pid = fl->fl_type != F_UNLCK ? task_tgid(current) : NULL; 2406 pid_t pid_nr = pid_nr_ns(pid, fc->pid_ns); 2407 int err; 2408 2409 if (fl->fl_lmops && fl->fl_lmops->lm_grant) { 2410 /* NLM needs asynchronous locks, which we don't support yet */ 2411 return -ENOLCK; 2412 } 2413 2414 /* Unlock on close is handled by the flush method */ 2415 if ((fl->fl_flags & FL_CLOSE_POSIX) == FL_CLOSE_POSIX) 2416 return 0; 2417 2418 fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg); 2419 err = fuse_simple_request(fc, &args); 2420 2421 /* locking is restartable */ 2422 if (err == -EINTR) 2423 err = -ERESTARTSYS; 2424 2425 return err; 2426 } 2427 2428 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) 2429 { 2430 struct inode *inode = file_inode(file); 2431 struct fuse_conn *fc = get_fuse_conn(inode); 2432 int err; 2433 2434 if (cmd == F_CANCELLK) { 2435 err = 0; 2436 } else if (cmd == F_GETLK) { 2437 if (fc->no_lock) { 2438 posix_test_lock(file, fl); 2439 err = 0; 2440 } else 2441 err = fuse_getlk(file, fl); 2442 } else { 2443 if (fc->no_lock) 2444 err = posix_lock_file(file, fl, NULL); 2445 else 2446 err = fuse_setlk(file, fl, 0); 2447 } 2448 return err; 2449 } 2450 2451 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) 2452 { 2453 struct inode *inode = file_inode(file); 2454 struct fuse_conn *fc = get_fuse_conn(inode); 2455 int err; 2456 2457 if (fc->no_flock) { 2458 err = locks_lock_file_wait(file, fl); 2459 } else { 2460 struct fuse_file *ff = file->private_data; 2461 2462 /* emulate flock with POSIX locks */ 2463 ff->flock = true; 2464 err = fuse_setlk(file, fl, 1); 2465 } 2466 2467 return err; 2468 } 2469 2470 static sector_t fuse_bmap(struct address_space *mapping, sector_t block) 2471 { 2472 struct inode *inode = mapping->host; 2473 struct fuse_conn *fc = get_fuse_conn(inode); 2474 FUSE_ARGS(args); 2475 struct fuse_bmap_in inarg; 2476 struct fuse_bmap_out outarg; 2477 int err; 2478 2479 if (!inode->i_sb->s_bdev || fc->no_bmap) 2480 return 0; 2481 2482 memset(&inarg, 0, sizeof(inarg)); 2483 inarg.block = block; 2484 inarg.blocksize = inode->i_sb->s_blocksize; 2485 args.opcode = FUSE_BMAP; 2486 args.nodeid = get_node_id(inode); 2487 args.in_numargs = 1; 2488 args.in_args[0].size = sizeof(inarg); 2489 args.in_args[0].value = &inarg; 2490 args.out_numargs = 1; 2491 args.out_args[0].size = sizeof(outarg); 2492 args.out_args[0].value = &outarg; 2493 err = fuse_simple_request(fc, &args); 2494 if (err == -ENOSYS) 2495 fc->no_bmap = 1; 2496 2497 return err ? 0 : outarg.block; 2498 } 2499 2500 static loff_t fuse_lseek(struct file *file, loff_t offset, int whence) 2501 { 2502 struct inode *inode = file->f_mapping->host; 2503 struct fuse_conn *fc = get_fuse_conn(inode); 2504 struct fuse_file *ff = file->private_data; 2505 FUSE_ARGS(args); 2506 struct fuse_lseek_in inarg = { 2507 .fh = ff->fh, 2508 .offset = offset, 2509 .whence = whence 2510 }; 2511 struct fuse_lseek_out outarg; 2512 int err; 2513 2514 if (fc->no_lseek) 2515 goto fallback; 2516 2517 args.opcode = FUSE_LSEEK; 2518 args.nodeid = ff->nodeid; 2519 args.in_numargs = 1; 2520 args.in_args[0].size = sizeof(inarg); 2521 args.in_args[0].value = &inarg; 2522 args.out_numargs = 1; 2523 args.out_args[0].size = sizeof(outarg); 2524 args.out_args[0].value = &outarg; 2525 err = fuse_simple_request(fc, &args); 2526 if (err) { 2527 if (err == -ENOSYS) { 2528 fc->no_lseek = 1; 2529 goto fallback; 2530 } 2531 return err; 2532 } 2533 2534 return vfs_setpos(file, outarg.offset, inode->i_sb->s_maxbytes); 2535 2536 fallback: 2537 err = fuse_update_attributes(inode, file); 2538 if (!err) 2539 return generic_file_llseek(file, offset, whence); 2540 else 2541 return err; 2542 } 2543 2544 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence) 2545 { 2546 loff_t retval; 2547 struct inode *inode = file_inode(file); 2548 2549 switch (whence) { 2550 case SEEK_SET: 2551 case SEEK_CUR: 2552 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */ 2553 retval = generic_file_llseek(file, offset, whence); 2554 break; 2555 case SEEK_END: 2556 inode_lock(inode); 2557 retval = fuse_update_attributes(inode, file); 2558 if (!retval) 2559 retval = generic_file_llseek(file, offset, whence); 2560 inode_unlock(inode); 2561 break; 2562 case SEEK_HOLE: 2563 case SEEK_DATA: 2564 inode_lock(inode); 2565 retval = fuse_lseek(file, offset, whence); 2566 inode_unlock(inode); 2567 break; 2568 default: 2569 retval = -EINVAL; 2570 } 2571 2572 return retval; 2573 } 2574 2575 /* 2576 * CUSE servers compiled on 32bit broke on 64bit kernels because the 2577 * ABI was defined to be 'struct iovec' which is different on 32bit 2578 * and 64bit. Fortunately we can determine which structure the server 2579 * used from the size of the reply. 2580 */ 2581 static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src, 2582 size_t transferred, unsigned count, 2583 bool is_compat) 2584 { 2585 #ifdef CONFIG_COMPAT 2586 if (count * sizeof(struct compat_iovec) == transferred) { 2587 struct compat_iovec *ciov = src; 2588 unsigned i; 2589 2590 /* 2591 * With this interface a 32bit server cannot support 2592 * non-compat (i.e. ones coming from 64bit apps) ioctl 2593 * requests 2594 */ 2595 if (!is_compat) 2596 return -EINVAL; 2597 2598 for (i = 0; i < count; i++) { 2599 dst[i].iov_base = compat_ptr(ciov[i].iov_base); 2600 dst[i].iov_len = ciov[i].iov_len; 2601 } 2602 return 0; 2603 } 2604 #endif 2605 2606 if (count * sizeof(struct iovec) != transferred) 2607 return -EIO; 2608 2609 memcpy(dst, src, transferred); 2610 return 0; 2611 } 2612 2613 /* Make sure iov_length() won't overflow */ 2614 static int fuse_verify_ioctl_iov(struct fuse_conn *fc, struct iovec *iov, 2615 size_t count) 2616 { 2617 size_t n; 2618 u32 max = fc->max_pages << PAGE_SHIFT; 2619 2620 for (n = 0; n < count; n++, iov++) { 2621 if (iov->iov_len > (size_t) max) 2622 return -ENOMEM; 2623 max -= iov->iov_len; 2624 } 2625 return 0; 2626 } 2627 2628 static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst, 2629 void *src, size_t transferred, unsigned count, 2630 bool is_compat) 2631 { 2632 unsigned i; 2633 struct fuse_ioctl_iovec *fiov = src; 2634 2635 if (fc->minor < 16) { 2636 return fuse_copy_ioctl_iovec_old(dst, src, transferred, 2637 count, is_compat); 2638 } 2639 2640 if (count * sizeof(struct fuse_ioctl_iovec) != transferred) 2641 return -EIO; 2642 2643 for (i = 0; i < count; i++) { 2644 /* Did the server supply an inappropriate value? */ 2645 if (fiov[i].base != (unsigned long) fiov[i].base || 2646 fiov[i].len != (unsigned long) fiov[i].len) 2647 return -EIO; 2648 2649 dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base; 2650 dst[i].iov_len = (size_t) fiov[i].len; 2651 2652 #ifdef CONFIG_COMPAT 2653 if (is_compat && 2654 (ptr_to_compat(dst[i].iov_base) != fiov[i].base || 2655 (compat_size_t) dst[i].iov_len != fiov[i].len)) 2656 return -EIO; 2657 #endif 2658 } 2659 2660 return 0; 2661 } 2662 2663 2664 /* 2665 * For ioctls, there is no generic way to determine how much memory 2666 * needs to be read and/or written. Furthermore, ioctls are allowed 2667 * to dereference the passed pointer, so the parameter requires deep 2668 * copying but FUSE has no idea whatsoever about what to copy in or 2669 * out. 2670 * 2671 * This is solved by allowing FUSE server to retry ioctl with 2672 * necessary in/out iovecs. Let's assume the ioctl implementation 2673 * needs to read in the following structure. 2674 * 2675 * struct a { 2676 * char *buf; 2677 * size_t buflen; 2678 * } 2679 * 2680 * On the first callout to FUSE server, inarg->in_size and 2681 * inarg->out_size will be NULL; then, the server completes the ioctl 2682 * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and 2683 * the actual iov array to 2684 * 2685 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } } 2686 * 2687 * which tells FUSE to copy in the requested area and retry the ioctl. 2688 * On the second round, the server has access to the structure and 2689 * from that it can tell what to look for next, so on the invocation, 2690 * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to 2691 * 2692 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) }, 2693 * { .iov_base = a.buf, .iov_len = a.buflen } } 2694 * 2695 * FUSE will copy both struct a and the pointed buffer from the 2696 * process doing the ioctl and retry ioctl with both struct a and the 2697 * buffer. 2698 * 2699 * This time, FUSE server has everything it needs and completes ioctl 2700 * without FUSE_IOCTL_RETRY which finishes the ioctl call. 2701 * 2702 * Copying data out works the same way. 2703 * 2704 * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel 2705 * automatically initializes in and out iovs by decoding @cmd with 2706 * _IOC_* macros and the server is not allowed to request RETRY. This 2707 * limits ioctl data transfers to well-formed ioctls and is the forced 2708 * behavior for all FUSE servers. 2709 */ 2710 long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, 2711 unsigned int flags) 2712 { 2713 struct fuse_file *ff = file->private_data; 2714 struct fuse_conn *fc = ff->fc; 2715 struct fuse_ioctl_in inarg = { 2716 .fh = ff->fh, 2717 .cmd = cmd, 2718 .arg = arg, 2719 .flags = flags 2720 }; 2721 struct fuse_ioctl_out outarg; 2722 struct iovec *iov_page = NULL; 2723 struct iovec *in_iov = NULL, *out_iov = NULL; 2724 unsigned int in_iovs = 0, out_iovs = 0, max_pages; 2725 size_t in_size, out_size, c; 2726 ssize_t transferred; 2727 int err, i; 2728 struct iov_iter ii; 2729 struct fuse_args_pages ap = {}; 2730 2731 #if BITS_PER_LONG == 32 2732 inarg.flags |= FUSE_IOCTL_32BIT; 2733 #else 2734 if (flags & FUSE_IOCTL_COMPAT) { 2735 inarg.flags |= FUSE_IOCTL_32BIT; 2736 #ifdef CONFIG_X86_X32 2737 if (in_x32_syscall()) 2738 inarg.flags |= FUSE_IOCTL_COMPAT_X32; 2739 #endif 2740 } 2741 #endif 2742 2743 /* assume all the iovs returned by client always fits in a page */ 2744 BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); 2745 2746 err = -ENOMEM; 2747 ap.pages = fuse_pages_alloc(fc->max_pages, GFP_KERNEL, &ap.descs); 2748 iov_page = (struct iovec *) __get_free_page(GFP_KERNEL); 2749 if (!ap.pages || !iov_page) 2750 goto out; 2751 2752 fuse_page_descs_length_init(ap.descs, 0, fc->max_pages); 2753 2754 /* 2755 * If restricted, initialize IO parameters as encoded in @cmd. 2756 * RETRY from server is not allowed. 2757 */ 2758 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) { 2759 struct iovec *iov = iov_page; 2760 2761 iov->iov_base = (void __user *)arg; 2762 iov->iov_len = _IOC_SIZE(cmd); 2763 2764 if (_IOC_DIR(cmd) & _IOC_WRITE) { 2765 in_iov = iov; 2766 in_iovs = 1; 2767 } 2768 2769 if (_IOC_DIR(cmd) & _IOC_READ) { 2770 out_iov = iov; 2771 out_iovs = 1; 2772 } 2773 } 2774 2775 retry: 2776 inarg.in_size = in_size = iov_length(in_iov, in_iovs); 2777 inarg.out_size = out_size = iov_length(out_iov, out_iovs); 2778 2779 /* 2780 * Out data can be used either for actual out data or iovs, 2781 * make sure there always is at least one page. 2782 */ 2783 out_size = max_t(size_t, out_size, PAGE_SIZE); 2784 max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE); 2785 2786 /* make sure there are enough buffer pages and init request with them */ 2787 err = -ENOMEM; 2788 if (max_pages > fc->max_pages) 2789 goto out; 2790 while (ap.num_pages < max_pages) { 2791 ap.pages[ap.num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); 2792 if (!ap.pages[ap.num_pages]) 2793 goto out; 2794 ap.num_pages++; 2795 } 2796 2797 2798 /* okay, let's send it to the client */ 2799 ap.args.opcode = FUSE_IOCTL; 2800 ap.args.nodeid = ff->nodeid; 2801 ap.args.in_numargs = 1; 2802 ap.args.in_args[0].size = sizeof(inarg); 2803 ap.args.in_args[0].value = &inarg; 2804 if (in_size) { 2805 ap.args.in_numargs++; 2806 ap.args.in_args[1].size = in_size; 2807 ap.args.in_pages = true; 2808 2809 err = -EFAULT; 2810 iov_iter_init(&ii, WRITE, in_iov, in_iovs, in_size); 2811 for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) { 2812 c = copy_page_from_iter(ap.pages[i], 0, PAGE_SIZE, &ii); 2813 if (c != PAGE_SIZE && iov_iter_count(&ii)) 2814 goto out; 2815 } 2816 } 2817 2818 ap.args.out_numargs = 2; 2819 ap.args.out_args[0].size = sizeof(outarg); 2820 ap.args.out_args[0].value = &outarg; 2821 ap.args.out_args[1].size = out_size; 2822 ap.args.out_pages = true; 2823 ap.args.out_argvar = true; 2824 2825 transferred = fuse_simple_request(fc, &ap.args); 2826 err = transferred; 2827 if (transferred < 0) 2828 goto out; 2829 2830 /* did it ask for retry? */ 2831 if (outarg.flags & FUSE_IOCTL_RETRY) { 2832 void *vaddr; 2833 2834 /* no retry if in restricted mode */ 2835 err = -EIO; 2836 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) 2837 goto out; 2838 2839 in_iovs = outarg.in_iovs; 2840 out_iovs = outarg.out_iovs; 2841 2842 /* 2843 * Make sure things are in boundary, separate checks 2844 * are to protect against overflow. 2845 */ 2846 err = -ENOMEM; 2847 if (in_iovs > FUSE_IOCTL_MAX_IOV || 2848 out_iovs > FUSE_IOCTL_MAX_IOV || 2849 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) 2850 goto out; 2851 2852 vaddr = kmap_atomic(ap.pages[0]); 2853 err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr, 2854 transferred, in_iovs + out_iovs, 2855 (flags & FUSE_IOCTL_COMPAT) != 0); 2856 kunmap_atomic(vaddr); 2857 if (err) 2858 goto out; 2859 2860 in_iov = iov_page; 2861 out_iov = in_iov + in_iovs; 2862 2863 err = fuse_verify_ioctl_iov(fc, in_iov, in_iovs); 2864 if (err) 2865 goto out; 2866 2867 err = fuse_verify_ioctl_iov(fc, out_iov, out_iovs); 2868 if (err) 2869 goto out; 2870 2871 goto retry; 2872 } 2873 2874 err = -EIO; 2875 if (transferred > inarg.out_size) 2876 goto out; 2877 2878 err = -EFAULT; 2879 iov_iter_init(&ii, READ, out_iov, out_iovs, transferred); 2880 for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) { 2881 c = copy_page_to_iter(ap.pages[i], 0, PAGE_SIZE, &ii); 2882 if (c != PAGE_SIZE && iov_iter_count(&ii)) 2883 goto out; 2884 } 2885 err = 0; 2886 out: 2887 free_page((unsigned long) iov_page); 2888 while (ap.num_pages) 2889 __free_page(ap.pages[--ap.num_pages]); 2890 kfree(ap.pages); 2891 2892 return err ? err : outarg.result; 2893 } 2894 EXPORT_SYMBOL_GPL(fuse_do_ioctl); 2895 2896 long fuse_ioctl_common(struct file *file, unsigned int cmd, 2897 unsigned long arg, unsigned int flags) 2898 { 2899 struct inode *inode = file_inode(file); 2900 struct fuse_conn *fc = get_fuse_conn(inode); 2901 2902 if (!fuse_allow_current_process(fc)) 2903 return -EACCES; 2904 2905 if (is_bad_inode(inode)) 2906 return -EIO; 2907 2908 return fuse_do_ioctl(file, cmd, arg, flags); 2909 } 2910 2911 static long fuse_file_ioctl(struct file *file, unsigned int cmd, 2912 unsigned long arg) 2913 { 2914 return fuse_ioctl_common(file, cmd, arg, 0); 2915 } 2916 2917 static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, 2918 unsigned long arg) 2919 { 2920 return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT); 2921 } 2922 2923 /* 2924 * All files which have been polled are linked to RB tree 2925 * fuse_conn->polled_files which is indexed by kh. Walk the tree and 2926 * find the matching one. 2927 */ 2928 static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh, 2929 struct rb_node **parent_out) 2930 { 2931 struct rb_node **link = &fc->polled_files.rb_node; 2932 struct rb_node *last = NULL; 2933 2934 while (*link) { 2935 struct fuse_file *ff; 2936 2937 last = *link; 2938 ff = rb_entry(last, struct fuse_file, polled_node); 2939 2940 if (kh < ff->kh) 2941 link = &last->rb_left; 2942 else if (kh > ff->kh) 2943 link = &last->rb_right; 2944 else 2945 return link; 2946 } 2947 2948 if (parent_out) 2949 *parent_out = last; 2950 return link; 2951 } 2952 2953 /* 2954 * The file is about to be polled. Make sure it's on the polled_files 2955 * RB tree. Note that files once added to the polled_files tree are 2956 * not removed before the file is released. This is because a file 2957 * polled once is likely to be polled again. 2958 */ 2959 static void fuse_register_polled_file(struct fuse_conn *fc, 2960 struct fuse_file *ff) 2961 { 2962 spin_lock(&fc->lock); 2963 if (RB_EMPTY_NODE(&ff->polled_node)) { 2964 struct rb_node **link, *uninitialized_var(parent); 2965 2966 link = fuse_find_polled_node(fc, ff->kh, &parent); 2967 BUG_ON(*link); 2968 rb_link_node(&ff->polled_node, parent, link); 2969 rb_insert_color(&ff->polled_node, &fc->polled_files); 2970 } 2971 spin_unlock(&fc->lock); 2972 } 2973 2974 __poll_t fuse_file_poll(struct file *file, poll_table *wait) 2975 { 2976 struct fuse_file *ff = file->private_data; 2977 struct fuse_conn *fc = ff->fc; 2978 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; 2979 struct fuse_poll_out outarg; 2980 FUSE_ARGS(args); 2981 int err; 2982 2983 if (fc->no_poll) 2984 return DEFAULT_POLLMASK; 2985 2986 poll_wait(file, &ff->poll_wait, wait); 2987 inarg.events = mangle_poll(poll_requested_events(wait)); 2988 2989 /* 2990 * Ask for notification iff there's someone waiting for it. 2991 * The client may ignore the flag and always notify. 2992 */ 2993 if (waitqueue_active(&ff->poll_wait)) { 2994 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; 2995 fuse_register_polled_file(fc, ff); 2996 } 2997 2998 args.opcode = FUSE_POLL; 2999 args.nodeid = ff->nodeid; 3000 args.in_numargs = 1; 3001 args.in_args[0].size = sizeof(inarg); 3002 args.in_args[0].value = &inarg; 3003 args.out_numargs = 1; 3004 args.out_args[0].size = sizeof(outarg); 3005 args.out_args[0].value = &outarg; 3006 err = fuse_simple_request(fc, &args); 3007 3008 if (!err) 3009 return demangle_poll(outarg.revents); 3010 if (err == -ENOSYS) { 3011 fc->no_poll = 1; 3012 return DEFAULT_POLLMASK; 3013 } 3014 return EPOLLERR; 3015 } 3016 EXPORT_SYMBOL_GPL(fuse_file_poll); 3017 3018 /* 3019 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and 3020 * wakes up the poll waiters. 3021 */ 3022 int fuse_notify_poll_wakeup(struct fuse_conn *fc, 3023 struct fuse_notify_poll_wakeup_out *outarg) 3024 { 3025 u64 kh = outarg->kh; 3026 struct rb_node **link; 3027 3028 spin_lock(&fc->lock); 3029 3030 link = fuse_find_polled_node(fc, kh, NULL); 3031 if (*link) { 3032 struct fuse_file *ff; 3033 3034 ff = rb_entry(*link, struct fuse_file, polled_node); 3035 wake_up_interruptible_sync(&ff->poll_wait); 3036 } 3037 3038 spin_unlock(&fc->lock); 3039 return 0; 3040 } 3041 3042 static void fuse_do_truncate(struct file *file) 3043 { 3044 struct inode *inode = file->f_mapping->host; 3045 struct iattr attr; 3046 3047 attr.ia_valid = ATTR_SIZE; 3048 attr.ia_size = i_size_read(inode); 3049 3050 attr.ia_file = file; 3051 attr.ia_valid |= ATTR_FILE; 3052 3053 fuse_do_setattr(file_dentry(file), &attr, file); 3054 } 3055 3056 static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off) 3057 { 3058 return round_up(off, fc->max_pages << PAGE_SHIFT); 3059 } 3060 3061 static ssize_t 3062 fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 3063 { 3064 DECLARE_COMPLETION_ONSTACK(wait); 3065 ssize_t ret = 0; 3066 struct file *file = iocb->ki_filp; 3067 struct fuse_file *ff = file->private_data; 3068 bool async_dio = ff->fc->async_dio; 3069 loff_t pos = 0; 3070 struct inode *inode; 3071 loff_t i_size; 3072 size_t count = iov_iter_count(iter); 3073 loff_t offset = iocb->ki_pos; 3074 struct fuse_io_priv *io; 3075 3076 pos = offset; 3077 inode = file->f_mapping->host; 3078 i_size = i_size_read(inode); 3079 3080 if ((iov_iter_rw(iter) == READ) && (offset > i_size)) 3081 return 0; 3082 3083 /* optimization for short read */ 3084 if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) { 3085 if (offset >= i_size) 3086 return 0; 3087 iov_iter_truncate(iter, fuse_round_up(ff->fc, i_size - offset)); 3088 count = iov_iter_count(iter); 3089 } 3090 3091 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); 3092 if (!io) 3093 return -ENOMEM; 3094 spin_lock_init(&io->lock); 3095 kref_init(&io->refcnt); 3096 io->reqs = 1; 3097 io->bytes = -1; 3098 io->size = 0; 3099 io->offset = offset; 3100 io->write = (iov_iter_rw(iter) == WRITE); 3101 io->err = 0; 3102 /* 3103 * By default, we want to optimize all I/Os with async request 3104 * submission to the client filesystem if supported. 3105 */ 3106 io->async = async_dio; 3107 io->iocb = iocb; 3108 io->blocking = is_sync_kiocb(iocb); 3109 3110 /* 3111 * We cannot asynchronously extend the size of a file. 3112 * In such case the aio will behave exactly like sync io. 3113 */ 3114 if ((offset + count > i_size) && iov_iter_rw(iter) == WRITE) 3115 io->blocking = true; 3116 3117 if (io->async && io->blocking) { 3118 /* 3119 * Additional reference to keep io around after 3120 * calling fuse_aio_complete() 3121 */ 3122 kref_get(&io->refcnt); 3123 io->done = &wait; 3124 } 3125 3126 if (iov_iter_rw(iter) == WRITE) { 3127 ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE); 3128 fuse_invalidate_attr(inode); 3129 } else { 3130 ret = __fuse_direct_read(io, iter, &pos); 3131 } 3132 3133 if (io->async) { 3134 bool blocking = io->blocking; 3135 3136 fuse_aio_complete(io, ret < 0 ? ret : 0, -1); 3137 3138 /* we have a non-extending, async request, so return */ 3139 if (!blocking) 3140 return -EIOCBQUEUED; 3141 3142 wait_for_completion(&wait); 3143 ret = fuse_get_res_by_io(io); 3144 } 3145 3146 kref_put(&io->refcnt, fuse_io_release); 3147 3148 if (iov_iter_rw(iter) == WRITE) { 3149 if (ret > 0) 3150 fuse_write_update_size(inode, pos); 3151 else if (ret < 0 && offset + count > i_size) 3152 fuse_do_truncate(file); 3153 } 3154 3155 return ret; 3156 } 3157 3158 static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end) 3159 { 3160 int err = filemap_write_and_wait_range(inode->i_mapping, start, end); 3161 3162 if (!err) 3163 fuse_sync_writes(inode); 3164 3165 return err; 3166 } 3167 3168 static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, 3169 loff_t length) 3170 { 3171 struct fuse_file *ff = file->private_data; 3172 struct inode *inode = file_inode(file); 3173 struct fuse_inode *fi = get_fuse_inode(inode); 3174 struct fuse_conn *fc = ff->fc; 3175 FUSE_ARGS(args); 3176 struct fuse_fallocate_in inarg = { 3177 .fh = ff->fh, 3178 .offset = offset, 3179 .length = length, 3180 .mode = mode 3181 }; 3182 int err; 3183 bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) || 3184 (mode & FALLOC_FL_PUNCH_HOLE); 3185 3186 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 3187 return -EOPNOTSUPP; 3188 3189 if (fc->no_fallocate) 3190 return -EOPNOTSUPP; 3191 3192 if (lock_inode) { 3193 inode_lock(inode); 3194 if (mode & FALLOC_FL_PUNCH_HOLE) { 3195 loff_t endbyte = offset + length - 1; 3196 3197 err = fuse_writeback_range(inode, offset, endbyte); 3198 if (err) 3199 goto out; 3200 } 3201 } 3202 3203 if (!(mode & FALLOC_FL_KEEP_SIZE) && 3204 offset + length > i_size_read(inode)) { 3205 err = inode_newsize_ok(inode, offset + length); 3206 if (err) 3207 goto out; 3208 } 3209 3210 if (!(mode & FALLOC_FL_KEEP_SIZE)) 3211 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 3212 3213 args.opcode = FUSE_FALLOCATE; 3214 args.nodeid = ff->nodeid; 3215 args.in_numargs = 1; 3216 args.in_args[0].size = sizeof(inarg); 3217 args.in_args[0].value = &inarg; 3218 err = fuse_simple_request(fc, &args); 3219 if (err == -ENOSYS) { 3220 fc->no_fallocate = 1; 3221 err = -EOPNOTSUPP; 3222 } 3223 if (err) 3224 goto out; 3225 3226 /* we could have extended the file */ 3227 if (!(mode & FALLOC_FL_KEEP_SIZE)) { 3228 bool changed = fuse_write_update_size(inode, offset + length); 3229 3230 if (changed && fc->writeback_cache) 3231 file_update_time(file); 3232 } 3233 3234 if (mode & FALLOC_FL_PUNCH_HOLE) 3235 truncate_pagecache_range(inode, offset, offset + length - 1); 3236 3237 fuse_invalidate_attr(inode); 3238 3239 out: 3240 if (!(mode & FALLOC_FL_KEEP_SIZE)) 3241 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 3242 3243 if (lock_inode) 3244 inode_unlock(inode); 3245 3246 return err; 3247 } 3248 3249 static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, 3250 struct file *file_out, loff_t pos_out, 3251 size_t len, unsigned int flags) 3252 { 3253 struct fuse_file *ff_in = file_in->private_data; 3254 struct fuse_file *ff_out = file_out->private_data; 3255 struct inode *inode_in = file_inode(file_in); 3256 struct inode *inode_out = file_inode(file_out); 3257 struct fuse_inode *fi_out = get_fuse_inode(inode_out); 3258 struct fuse_conn *fc = ff_in->fc; 3259 FUSE_ARGS(args); 3260 struct fuse_copy_file_range_in inarg = { 3261 .fh_in = ff_in->fh, 3262 .off_in = pos_in, 3263 .nodeid_out = ff_out->nodeid, 3264 .fh_out = ff_out->fh, 3265 .off_out = pos_out, 3266 .len = len, 3267 .flags = flags 3268 }; 3269 struct fuse_write_out outarg; 3270 ssize_t err; 3271 /* mark unstable when write-back is not used, and file_out gets 3272 * extended */ 3273 bool is_unstable = (!fc->writeback_cache) && 3274 ((pos_out + len) > inode_out->i_size); 3275 3276 if (fc->no_copy_file_range) 3277 return -EOPNOTSUPP; 3278 3279 if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) 3280 return -EXDEV; 3281 3282 if (fc->writeback_cache) { 3283 inode_lock(inode_in); 3284 err = fuse_writeback_range(inode_in, pos_in, pos_in + len); 3285 inode_unlock(inode_in); 3286 if (err) 3287 return err; 3288 } 3289 3290 inode_lock(inode_out); 3291 3292 err = file_modified(file_out); 3293 if (err) 3294 goto out; 3295 3296 if (fc->writeback_cache) { 3297 err = fuse_writeback_range(inode_out, pos_out, pos_out + len); 3298 if (err) 3299 goto out; 3300 } 3301 3302 if (is_unstable) 3303 set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state); 3304 3305 args.opcode = FUSE_COPY_FILE_RANGE; 3306 args.nodeid = ff_in->nodeid; 3307 args.in_numargs = 1; 3308 args.in_args[0].size = sizeof(inarg); 3309 args.in_args[0].value = &inarg; 3310 args.out_numargs = 1; 3311 args.out_args[0].size = sizeof(outarg); 3312 args.out_args[0].value = &outarg; 3313 err = fuse_simple_request(fc, &args); 3314 if (err == -ENOSYS) { 3315 fc->no_copy_file_range = 1; 3316 err = -EOPNOTSUPP; 3317 } 3318 if (err) 3319 goto out; 3320 3321 if (fc->writeback_cache) { 3322 fuse_write_update_size(inode_out, pos_out + outarg.size); 3323 file_update_time(file_out); 3324 } 3325 3326 fuse_invalidate_attr(inode_out); 3327 3328 err = outarg.size; 3329 out: 3330 if (is_unstable) 3331 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state); 3332 3333 inode_unlock(inode_out); 3334 file_accessed(file_in); 3335 3336 return err; 3337 } 3338 3339 static ssize_t fuse_copy_file_range(struct file *src_file, loff_t src_off, 3340 struct file *dst_file, loff_t dst_off, 3341 size_t len, unsigned int flags) 3342 { 3343 ssize_t ret; 3344 3345 ret = __fuse_copy_file_range(src_file, src_off, dst_file, dst_off, 3346 len, flags); 3347 3348 if (ret == -EOPNOTSUPP || ret == -EXDEV) 3349 ret = generic_copy_file_range(src_file, src_off, dst_file, 3350 dst_off, len, flags); 3351 return ret; 3352 } 3353 3354 static const struct file_operations fuse_file_operations = { 3355 .llseek = fuse_file_llseek, 3356 .read_iter = fuse_file_read_iter, 3357 .write_iter = fuse_file_write_iter, 3358 .mmap = fuse_file_mmap, 3359 .open = fuse_open, 3360 .flush = fuse_flush, 3361 .release = fuse_release, 3362 .fsync = fuse_fsync, 3363 .lock = fuse_file_lock, 3364 .flock = fuse_file_flock, 3365 .splice_read = generic_file_splice_read, 3366 .splice_write = iter_file_splice_write, 3367 .unlocked_ioctl = fuse_file_ioctl, 3368 .compat_ioctl = fuse_file_compat_ioctl, 3369 .poll = fuse_file_poll, 3370 .fallocate = fuse_file_fallocate, 3371 .copy_file_range = fuse_copy_file_range, 3372 }; 3373 3374 static const struct address_space_operations fuse_file_aops = { 3375 .readpage = fuse_readpage, 3376 .writepage = fuse_writepage, 3377 .writepages = fuse_writepages, 3378 .launder_page = fuse_launder_page, 3379 .readpages = fuse_readpages, 3380 .set_page_dirty = __set_page_dirty_nobuffers, 3381 .bmap = fuse_bmap, 3382 .direct_IO = fuse_direct_IO, 3383 .write_begin = fuse_write_begin, 3384 .write_end = fuse_write_end, 3385 }; 3386 3387 void fuse_init_file_inode(struct inode *inode) 3388 { 3389 struct fuse_inode *fi = get_fuse_inode(inode); 3390 3391 inode->i_fop = &fuse_file_operations; 3392 inode->i_data.a_ops = &fuse_file_aops; 3393 3394 INIT_LIST_HEAD(&fi->write_files); 3395 INIT_LIST_HEAD(&fi->queued_writes); 3396 fi->writectr = 0; 3397 init_waitqueue_head(&fi->page_waitq); 3398 INIT_LIST_HEAD(&fi->writepages); 3399 } 3400