1 /* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7 */ 8 9 #include "fuse_i.h" 10 11 #include <linux/pagemap.h> 12 #include <linux/slab.h> 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/module.h> 16 #include <linux/compat.h> 17 #include <linux/swap.h> 18 #include <linux/falloc.h> 19 #include <linux/uio.h> 20 21 static const struct file_operations fuse_direct_io_file_operations; 22 23 static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file, 24 int opcode, struct fuse_open_out *outargp) 25 { 26 struct fuse_open_in inarg; 27 FUSE_ARGS(args); 28 29 memset(&inarg, 0, sizeof(inarg)); 30 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); 31 if (!fc->atomic_o_trunc) 32 inarg.flags &= ~O_TRUNC; 33 args.in.h.opcode = opcode; 34 args.in.h.nodeid = nodeid; 35 args.in.numargs = 1; 36 args.in.args[0].size = sizeof(inarg); 37 args.in.args[0].value = &inarg; 38 args.out.numargs = 1; 39 args.out.args[0].size = sizeof(*outargp); 40 args.out.args[0].value = outargp; 41 42 return fuse_simple_request(fc, &args); 43 } 44 45 struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) 46 { 47 struct fuse_file *ff; 48 49 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); 50 if (unlikely(!ff)) 51 return NULL; 52 53 ff->fc = fc; 54 ff->reserved_req = fuse_request_alloc(0); 55 if (unlikely(!ff->reserved_req)) { 56 kfree(ff); 57 return NULL; 58 } 59 60 INIT_LIST_HEAD(&ff->write_entry); 61 atomic_set(&ff->count, 0); 62 RB_CLEAR_NODE(&ff->polled_node); 63 init_waitqueue_head(&ff->poll_wait); 64 65 spin_lock(&fc->lock); 66 ff->kh = ++fc->khctr; 67 spin_unlock(&fc->lock); 68 69 return ff; 70 } 71 72 void fuse_file_free(struct fuse_file *ff) 73 { 74 fuse_request_free(ff->reserved_req); 75 kfree(ff); 76 } 77 78 struct fuse_file *fuse_file_get(struct fuse_file *ff) 79 { 80 atomic_inc(&ff->count); 81 return ff; 82 } 83 84 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) 85 { 86 iput(req->misc.release.inode); 87 } 88 89 static void fuse_file_put(struct fuse_file *ff, bool sync) 90 { 91 if (atomic_dec_and_test(&ff->count)) { 92 struct fuse_req *req = ff->reserved_req; 93 94 if (ff->fc->no_open) { 95 /* 96 * Drop the release request when client does not 97 * implement 'open' 98 */ 99 __clear_bit(FR_BACKGROUND, &req->flags); 100 iput(req->misc.release.inode); 101 fuse_put_request(ff->fc, req); 102 } else if (sync) { 103 __clear_bit(FR_BACKGROUND, &req->flags); 104 fuse_request_send(ff->fc, req); 105 iput(req->misc.release.inode); 106 fuse_put_request(ff->fc, req); 107 } else { 108 req->end = fuse_release_end; 109 __set_bit(FR_BACKGROUND, &req->flags); 110 fuse_request_send_background(ff->fc, req); 111 } 112 kfree(ff); 113 } 114 } 115 116 int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, 117 bool isdir) 118 { 119 struct fuse_file *ff; 120 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; 121 122 ff = fuse_file_alloc(fc); 123 if (!ff) 124 return -ENOMEM; 125 126 ff->fh = 0; 127 ff->open_flags = FOPEN_KEEP_CACHE; /* Default for no-open */ 128 if (!fc->no_open || isdir) { 129 struct fuse_open_out outarg; 130 int err; 131 132 err = fuse_send_open(fc, nodeid, file, opcode, &outarg); 133 if (!err) { 134 ff->fh = outarg.fh; 135 ff->open_flags = outarg.open_flags; 136 137 } else if (err != -ENOSYS || isdir) { 138 fuse_file_free(ff); 139 return err; 140 } else { 141 fc->no_open = 1; 142 } 143 } 144 145 if (isdir) 146 ff->open_flags &= ~FOPEN_DIRECT_IO; 147 148 ff->nodeid = nodeid; 149 file->private_data = fuse_file_get(ff); 150 151 return 0; 152 } 153 EXPORT_SYMBOL_GPL(fuse_do_open); 154 155 static void fuse_link_write_file(struct file *file) 156 { 157 struct inode *inode = file_inode(file); 158 struct fuse_conn *fc = get_fuse_conn(inode); 159 struct fuse_inode *fi = get_fuse_inode(inode); 160 struct fuse_file *ff = file->private_data; 161 /* 162 * file may be written through mmap, so chain it onto the 163 * inodes's write_file list 164 */ 165 spin_lock(&fc->lock); 166 if (list_empty(&ff->write_entry)) 167 list_add(&ff->write_entry, &fi->write_files); 168 spin_unlock(&fc->lock); 169 } 170 171 void fuse_finish_open(struct inode *inode, struct file *file) 172 { 173 struct fuse_file *ff = file->private_data; 174 struct fuse_conn *fc = get_fuse_conn(inode); 175 176 if (ff->open_flags & FOPEN_DIRECT_IO) 177 file->f_op = &fuse_direct_io_file_operations; 178 if (!(ff->open_flags & FOPEN_KEEP_CACHE)) 179 invalidate_inode_pages2(inode->i_mapping); 180 if (ff->open_flags & FOPEN_NONSEEKABLE) 181 nonseekable_open(inode, file); 182 if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { 183 struct fuse_inode *fi = get_fuse_inode(inode); 184 185 spin_lock(&fc->lock); 186 fi->attr_version = ++fc->attr_version; 187 i_size_write(inode, 0); 188 spin_unlock(&fc->lock); 189 fuse_invalidate_attr(inode); 190 if (fc->writeback_cache) 191 file_update_time(file); 192 } 193 if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache) 194 fuse_link_write_file(file); 195 } 196 197 int fuse_open_common(struct inode *inode, struct file *file, bool isdir) 198 { 199 struct fuse_conn *fc = get_fuse_conn(inode); 200 int err; 201 bool lock_inode = (file->f_flags & O_TRUNC) && 202 fc->atomic_o_trunc && 203 fc->writeback_cache; 204 205 err = generic_file_open(inode, file); 206 if (err) 207 return err; 208 209 if (lock_inode) 210 inode_lock(inode); 211 212 err = fuse_do_open(fc, get_node_id(inode), file, isdir); 213 214 if (!err) 215 fuse_finish_open(inode, file); 216 217 if (lock_inode) 218 inode_unlock(inode); 219 220 return err; 221 } 222 223 static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode) 224 { 225 struct fuse_conn *fc = ff->fc; 226 struct fuse_req *req = ff->reserved_req; 227 struct fuse_release_in *inarg = &req->misc.release.in; 228 229 spin_lock(&fc->lock); 230 list_del(&ff->write_entry); 231 if (!RB_EMPTY_NODE(&ff->polled_node)) 232 rb_erase(&ff->polled_node, &fc->polled_files); 233 spin_unlock(&fc->lock); 234 235 wake_up_interruptible_all(&ff->poll_wait); 236 237 inarg->fh = ff->fh; 238 inarg->flags = flags; 239 req->in.h.opcode = opcode; 240 req->in.h.nodeid = ff->nodeid; 241 req->in.numargs = 1; 242 req->in.args[0].size = sizeof(struct fuse_release_in); 243 req->in.args[0].value = inarg; 244 } 245 246 void fuse_release_common(struct file *file, int opcode) 247 { 248 struct fuse_file *ff; 249 struct fuse_req *req; 250 251 ff = file->private_data; 252 if (unlikely(!ff)) 253 return; 254 255 req = ff->reserved_req; 256 fuse_prepare_release(ff, file->f_flags, opcode); 257 258 if (ff->flock) { 259 struct fuse_release_in *inarg = &req->misc.release.in; 260 inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; 261 inarg->lock_owner = fuse_lock_owner_id(ff->fc, 262 (fl_owner_t) file); 263 } 264 /* Hold inode until release is finished */ 265 req->misc.release.inode = igrab(file_inode(file)); 266 267 /* 268 * Normally this will send the RELEASE request, however if 269 * some asynchronous READ or WRITE requests are outstanding, 270 * the sending will be delayed. 271 * 272 * Make the release synchronous if this is a fuseblk mount, 273 * synchronous RELEASE is allowed (and desirable) in this case 274 * because the server can be trusted not to screw up. 275 */ 276 fuse_file_put(ff, ff->fc->destroy_req != NULL); 277 } 278 279 static int fuse_open(struct inode *inode, struct file *file) 280 { 281 return fuse_open_common(inode, file, false); 282 } 283 284 static int fuse_release(struct inode *inode, struct file *file) 285 { 286 struct fuse_conn *fc = get_fuse_conn(inode); 287 288 /* see fuse_vma_close() for !writeback_cache case */ 289 if (fc->writeback_cache) 290 write_inode_now(inode, 1); 291 292 fuse_release_common(file, FUSE_RELEASE); 293 294 /* return value is ignored by VFS */ 295 return 0; 296 } 297 298 void fuse_sync_release(struct fuse_file *ff, int flags) 299 { 300 WARN_ON(atomic_read(&ff->count) > 1); 301 fuse_prepare_release(ff, flags, FUSE_RELEASE); 302 __set_bit(FR_FORCE, &ff->reserved_req->flags); 303 __clear_bit(FR_BACKGROUND, &ff->reserved_req->flags); 304 fuse_request_send(ff->fc, ff->reserved_req); 305 fuse_put_request(ff->fc, ff->reserved_req); 306 kfree(ff); 307 } 308 EXPORT_SYMBOL_GPL(fuse_sync_release); 309 310 /* 311 * Scramble the ID space with XTEA, so that the value of the files_struct 312 * pointer is not exposed to userspace. 313 */ 314 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) 315 { 316 u32 *k = fc->scramble_key; 317 u64 v = (unsigned long) id; 318 u32 v0 = v; 319 u32 v1 = v >> 32; 320 u32 sum = 0; 321 int i; 322 323 for (i = 0; i < 32; i++) { 324 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); 325 sum += 0x9E3779B9; 326 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); 327 } 328 329 return (u64) v0 + ((u64) v1 << 32); 330 } 331 332 /* 333 * Check if any page in a range is under writeback 334 * 335 * This is currently done by walking the list of writepage requests 336 * for the inode, which can be pretty inefficient. 337 */ 338 static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from, 339 pgoff_t idx_to) 340 { 341 struct fuse_conn *fc = get_fuse_conn(inode); 342 struct fuse_inode *fi = get_fuse_inode(inode); 343 struct fuse_req *req; 344 bool found = false; 345 346 spin_lock(&fc->lock); 347 list_for_each_entry(req, &fi->writepages, writepages_entry) { 348 pgoff_t curr_index; 349 350 BUG_ON(req->inode != inode); 351 curr_index = req->misc.write.in.offset >> PAGE_SHIFT; 352 if (idx_from < curr_index + req->num_pages && 353 curr_index <= idx_to) { 354 found = true; 355 break; 356 } 357 } 358 spin_unlock(&fc->lock); 359 360 return found; 361 } 362 363 static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index) 364 { 365 return fuse_range_is_writeback(inode, index, index); 366 } 367 368 /* 369 * Wait for page writeback to be completed. 370 * 371 * Since fuse doesn't rely on the VM writeback tracking, this has to 372 * use some other means. 373 */ 374 static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) 375 { 376 struct fuse_inode *fi = get_fuse_inode(inode); 377 378 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); 379 return 0; 380 } 381 382 /* 383 * Wait for all pending writepages on the inode to finish. 384 * 385 * This is currently done by blocking further writes with FUSE_NOWRITE 386 * and waiting for all sent writes to complete. 387 * 388 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage 389 * could conflict with truncation. 390 */ 391 static void fuse_sync_writes(struct inode *inode) 392 { 393 fuse_set_nowrite(inode); 394 fuse_release_nowrite(inode); 395 } 396 397 static int fuse_flush(struct file *file, fl_owner_t id) 398 { 399 struct inode *inode = file_inode(file); 400 struct fuse_conn *fc = get_fuse_conn(inode); 401 struct fuse_file *ff = file->private_data; 402 struct fuse_req *req; 403 struct fuse_flush_in inarg; 404 int err; 405 406 if (is_bad_inode(inode)) 407 return -EIO; 408 409 if (fc->no_flush) 410 return 0; 411 412 err = write_inode_now(inode, 1); 413 if (err) 414 return err; 415 416 inode_lock(inode); 417 fuse_sync_writes(inode); 418 inode_unlock(inode); 419 420 req = fuse_get_req_nofail_nopages(fc, file); 421 memset(&inarg, 0, sizeof(inarg)); 422 inarg.fh = ff->fh; 423 inarg.lock_owner = fuse_lock_owner_id(fc, id); 424 req->in.h.opcode = FUSE_FLUSH; 425 req->in.h.nodeid = get_node_id(inode); 426 req->in.numargs = 1; 427 req->in.args[0].size = sizeof(inarg); 428 req->in.args[0].value = &inarg; 429 __set_bit(FR_FORCE, &req->flags); 430 fuse_request_send(fc, req); 431 err = req->out.h.error; 432 fuse_put_request(fc, req); 433 if (err == -ENOSYS) { 434 fc->no_flush = 1; 435 err = 0; 436 } 437 return err; 438 } 439 440 int fuse_fsync_common(struct file *file, loff_t start, loff_t end, 441 int datasync, int isdir) 442 { 443 struct inode *inode = file->f_mapping->host; 444 struct fuse_conn *fc = get_fuse_conn(inode); 445 struct fuse_file *ff = file->private_data; 446 FUSE_ARGS(args); 447 struct fuse_fsync_in inarg; 448 int err; 449 450 if (is_bad_inode(inode)) 451 return -EIO; 452 453 inode_lock(inode); 454 455 /* 456 * Start writeback against all dirty pages of the inode, then 457 * wait for all outstanding writes, before sending the FSYNC 458 * request. 459 */ 460 err = filemap_write_and_wait_range(inode->i_mapping, start, end); 461 if (err) 462 goto out; 463 464 fuse_sync_writes(inode); 465 err = sync_inode_metadata(inode, 1); 466 if (err) 467 goto out; 468 469 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) 470 goto out; 471 472 memset(&inarg, 0, sizeof(inarg)); 473 inarg.fh = ff->fh; 474 inarg.fsync_flags = datasync ? 1 : 0; 475 args.in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; 476 args.in.h.nodeid = get_node_id(inode); 477 args.in.numargs = 1; 478 args.in.args[0].size = sizeof(inarg); 479 args.in.args[0].value = &inarg; 480 err = fuse_simple_request(fc, &args); 481 if (err == -ENOSYS) { 482 if (isdir) 483 fc->no_fsyncdir = 1; 484 else 485 fc->no_fsync = 1; 486 err = 0; 487 } 488 out: 489 inode_unlock(inode); 490 return err; 491 } 492 493 static int fuse_fsync(struct file *file, loff_t start, loff_t end, 494 int datasync) 495 { 496 return fuse_fsync_common(file, start, end, datasync, 0); 497 } 498 499 void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, 500 size_t count, int opcode) 501 { 502 struct fuse_read_in *inarg = &req->misc.read.in; 503 struct fuse_file *ff = file->private_data; 504 505 inarg->fh = ff->fh; 506 inarg->offset = pos; 507 inarg->size = count; 508 inarg->flags = file->f_flags; 509 req->in.h.opcode = opcode; 510 req->in.h.nodeid = ff->nodeid; 511 req->in.numargs = 1; 512 req->in.args[0].size = sizeof(struct fuse_read_in); 513 req->in.args[0].value = inarg; 514 req->out.argvar = 1; 515 req->out.numargs = 1; 516 req->out.args[0].size = count; 517 } 518 519 static void fuse_release_user_pages(struct fuse_req *req, int write) 520 { 521 unsigned i; 522 523 for (i = 0; i < req->num_pages; i++) { 524 struct page *page = req->pages[i]; 525 if (write) 526 set_page_dirty_lock(page); 527 put_page(page); 528 } 529 } 530 531 static void fuse_io_release(struct kref *kref) 532 { 533 kfree(container_of(kref, struct fuse_io_priv, refcnt)); 534 } 535 536 static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io) 537 { 538 if (io->err) 539 return io->err; 540 541 if (io->bytes >= 0 && io->write) 542 return -EIO; 543 544 return io->bytes < 0 ? io->size : io->bytes; 545 } 546 547 /** 548 * In case of short read, the caller sets 'pos' to the position of 549 * actual end of fuse request in IO request. Otherwise, if bytes_requested 550 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1. 551 * 552 * An example: 553 * User requested DIO read of 64K. It was splitted into two 32K fuse requests, 554 * both submitted asynchronously. The first of them was ACKed by userspace as 555 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The 556 * second request was ACKed as short, e.g. only 1K was read, resulting in 557 * pos == 33K. 558 * 559 * Thus, when all fuse requests are completed, the minimal non-negative 'pos' 560 * will be equal to the length of the longest contiguous fragment of 561 * transferred data starting from the beginning of IO request. 562 */ 563 static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos) 564 { 565 bool is_sync = is_sync_kiocb(io->iocb); 566 int left; 567 568 spin_lock(&io->lock); 569 if (err) 570 io->err = io->err ? : err; 571 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes)) 572 io->bytes = pos; 573 574 left = --io->reqs; 575 if (!left && is_sync) 576 complete(io->done); 577 spin_unlock(&io->lock); 578 579 if (!left && !is_sync) { 580 ssize_t res = fuse_get_res_by_io(io); 581 582 if (res >= 0) { 583 struct inode *inode = file_inode(io->iocb->ki_filp); 584 struct fuse_conn *fc = get_fuse_conn(inode); 585 struct fuse_inode *fi = get_fuse_inode(inode); 586 587 spin_lock(&fc->lock); 588 fi->attr_version = ++fc->attr_version; 589 spin_unlock(&fc->lock); 590 } 591 592 io->iocb->ki_complete(io->iocb, res, 0); 593 } 594 595 kref_put(&io->refcnt, fuse_io_release); 596 } 597 598 static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req) 599 { 600 struct fuse_io_priv *io = req->io; 601 ssize_t pos = -1; 602 603 fuse_release_user_pages(req, !io->write); 604 605 if (io->write) { 606 if (req->misc.write.in.size != req->misc.write.out.size) 607 pos = req->misc.write.in.offset - io->offset + 608 req->misc.write.out.size; 609 } else { 610 if (req->misc.read.in.size != req->out.args[0].size) 611 pos = req->misc.read.in.offset - io->offset + 612 req->out.args[0].size; 613 } 614 615 fuse_aio_complete(io, req->out.h.error, pos); 616 } 617 618 static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req, 619 size_t num_bytes, struct fuse_io_priv *io) 620 { 621 spin_lock(&io->lock); 622 kref_get(&io->refcnt); 623 io->size += num_bytes; 624 io->reqs++; 625 spin_unlock(&io->lock); 626 627 req->io = io; 628 req->end = fuse_aio_complete_req; 629 630 __fuse_get_request(req); 631 fuse_request_send_background(fc, req); 632 633 return num_bytes; 634 } 635 636 static size_t fuse_send_read(struct fuse_req *req, struct fuse_io_priv *io, 637 loff_t pos, size_t count, fl_owner_t owner) 638 { 639 struct file *file = io->file; 640 struct fuse_file *ff = file->private_data; 641 struct fuse_conn *fc = ff->fc; 642 643 fuse_read_fill(req, file, pos, count, FUSE_READ); 644 if (owner != NULL) { 645 struct fuse_read_in *inarg = &req->misc.read.in; 646 647 inarg->read_flags |= FUSE_READ_LOCKOWNER; 648 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 649 } 650 651 if (io->async) 652 return fuse_async_req_send(fc, req, count, io); 653 654 fuse_request_send(fc, req); 655 return req->out.args[0].size; 656 } 657 658 static void fuse_read_update_size(struct inode *inode, loff_t size, 659 u64 attr_ver) 660 { 661 struct fuse_conn *fc = get_fuse_conn(inode); 662 struct fuse_inode *fi = get_fuse_inode(inode); 663 664 spin_lock(&fc->lock); 665 if (attr_ver == fi->attr_version && size < inode->i_size && 666 !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) { 667 fi->attr_version = ++fc->attr_version; 668 i_size_write(inode, size); 669 } 670 spin_unlock(&fc->lock); 671 } 672 673 static void fuse_short_read(struct fuse_req *req, struct inode *inode, 674 u64 attr_ver) 675 { 676 size_t num_read = req->out.args[0].size; 677 struct fuse_conn *fc = get_fuse_conn(inode); 678 679 if (fc->writeback_cache) { 680 /* 681 * A hole in a file. Some data after the hole are in page cache, 682 * but have not reached the client fs yet. So, the hole is not 683 * present there. 684 */ 685 int i; 686 int start_idx = num_read >> PAGE_SHIFT; 687 size_t off = num_read & (PAGE_SIZE - 1); 688 689 for (i = start_idx; i < req->num_pages; i++) { 690 zero_user_segment(req->pages[i], off, PAGE_SIZE); 691 off = 0; 692 } 693 } else { 694 loff_t pos = page_offset(req->pages[0]) + num_read; 695 fuse_read_update_size(inode, pos, attr_ver); 696 } 697 } 698 699 static int fuse_do_readpage(struct file *file, struct page *page) 700 { 701 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file); 702 struct inode *inode = page->mapping->host; 703 struct fuse_conn *fc = get_fuse_conn(inode); 704 struct fuse_req *req; 705 size_t num_read; 706 loff_t pos = page_offset(page); 707 size_t count = PAGE_SIZE; 708 u64 attr_ver; 709 int err; 710 711 /* 712 * Page writeback can extend beyond the lifetime of the 713 * page-cache page, so make sure we read a properly synced 714 * page. 715 */ 716 fuse_wait_on_page_writeback(inode, page->index); 717 718 req = fuse_get_req(fc, 1); 719 if (IS_ERR(req)) 720 return PTR_ERR(req); 721 722 attr_ver = fuse_get_attr_version(fc); 723 724 req->out.page_zeroing = 1; 725 req->out.argpages = 1; 726 req->num_pages = 1; 727 req->pages[0] = page; 728 req->page_descs[0].length = count; 729 num_read = fuse_send_read(req, &io, pos, count, NULL); 730 err = req->out.h.error; 731 732 if (!err) { 733 /* 734 * Short read means EOF. If file size is larger, truncate it 735 */ 736 if (num_read < count) 737 fuse_short_read(req, inode, attr_ver); 738 739 SetPageUptodate(page); 740 } 741 742 fuse_put_request(fc, req); 743 744 return err; 745 } 746 747 static int fuse_readpage(struct file *file, struct page *page) 748 { 749 struct inode *inode = page->mapping->host; 750 int err; 751 752 err = -EIO; 753 if (is_bad_inode(inode)) 754 goto out; 755 756 err = fuse_do_readpage(file, page); 757 fuse_invalidate_atime(inode); 758 out: 759 unlock_page(page); 760 return err; 761 } 762 763 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) 764 { 765 int i; 766 size_t count = req->misc.read.in.size; 767 size_t num_read = req->out.args[0].size; 768 struct address_space *mapping = NULL; 769 770 for (i = 0; mapping == NULL && i < req->num_pages; i++) 771 mapping = req->pages[i]->mapping; 772 773 if (mapping) { 774 struct inode *inode = mapping->host; 775 776 /* 777 * Short read means EOF. If file size is larger, truncate it 778 */ 779 if (!req->out.h.error && num_read < count) 780 fuse_short_read(req, inode, req->misc.read.attr_ver); 781 782 fuse_invalidate_atime(inode); 783 } 784 785 for (i = 0; i < req->num_pages; i++) { 786 struct page *page = req->pages[i]; 787 if (!req->out.h.error) 788 SetPageUptodate(page); 789 else 790 SetPageError(page); 791 unlock_page(page); 792 put_page(page); 793 } 794 if (req->ff) 795 fuse_file_put(req->ff, false); 796 } 797 798 static void fuse_send_readpages(struct fuse_req *req, struct file *file) 799 { 800 struct fuse_file *ff = file->private_data; 801 struct fuse_conn *fc = ff->fc; 802 loff_t pos = page_offset(req->pages[0]); 803 size_t count = req->num_pages << PAGE_SHIFT; 804 805 req->out.argpages = 1; 806 req->out.page_zeroing = 1; 807 req->out.page_replace = 1; 808 fuse_read_fill(req, file, pos, count, FUSE_READ); 809 req->misc.read.attr_ver = fuse_get_attr_version(fc); 810 if (fc->async_read) { 811 req->ff = fuse_file_get(ff); 812 req->end = fuse_readpages_end; 813 fuse_request_send_background(fc, req); 814 } else { 815 fuse_request_send(fc, req); 816 fuse_readpages_end(fc, req); 817 fuse_put_request(fc, req); 818 } 819 } 820 821 struct fuse_fill_data { 822 struct fuse_req *req; 823 struct file *file; 824 struct inode *inode; 825 unsigned nr_pages; 826 }; 827 828 static int fuse_readpages_fill(void *_data, struct page *page) 829 { 830 struct fuse_fill_data *data = _data; 831 struct fuse_req *req = data->req; 832 struct inode *inode = data->inode; 833 struct fuse_conn *fc = get_fuse_conn(inode); 834 835 fuse_wait_on_page_writeback(inode, page->index); 836 837 if (req->num_pages && 838 (req->num_pages == FUSE_MAX_PAGES_PER_REQ || 839 (req->num_pages + 1) * PAGE_SIZE > fc->max_read || 840 req->pages[req->num_pages - 1]->index + 1 != page->index)) { 841 int nr_alloc = min_t(unsigned, data->nr_pages, 842 FUSE_MAX_PAGES_PER_REQ); 843 fuse_send_readpages(req, data->file); 844 if (fc->async_read) 845 req = fuse_get_req_for_background(fc, nr_alloc); 846 else 847 req = fuse_get_req(fc, nr_alloc); 848 849 data->req = req; 850 if (IS_ERR(req)) { 851 unlock_page(page); 852 return PTR_ERR(req); 853 } 854 } 855 856 if (WARN_ON(req->num_pages >= req->max_pages)) { 857 fuse_put_request(fc, req); 858 return -EIO; 859 } 860 861 get_page(page); 862 req->pages[req->num_pages] = page; 863 req->page_descs[req->num_pages].length = PAGE_SIZE; 864 req->num_pages++; 865 data->nr_pages--; 866 return 0; 867 } 868 869 static int fuse_readpages(struct file *file, struct address_space *mapping, 870 struct list_head *pages, unsigned nr_pages) 871 { 872 struct inode *inode = mapping->host; 873 struct fuse_conn *fc = get_fuse_conn(inode); 874 struct fuse_fill_data data; 875 int err; 876 int nr_alloc = min_t(unsigned, nr_pages, FUSE_MAX_PAGES_PER_REQ); 877 878 err = -EIO; 879 if (is_bad_inode(inode)) 880 goto out; 881 882 data.file = file; 883 data.inode = inode; 884 if (fc->async_read) 885 data.req = fuse_get_req_for_background(fc, nr_alloc); 886 else 887 data.req = fuse_get_req(fc, nr_alloc); 888 data.nr_pages = nr_pages; 889 err = PTR_ERR(data.req); 890 if (IS_ERR(data.req)) 891 goto out; 892 893 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); 894 if (!err) { 895 if (data.req->num_pages) 896 fuse_send_readpages(data.req, file); 897 else 898 fuse_put_request(fc, data.req); 899 } 900 out: 901 return err; 902 } 903 904 static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 905 { 906 struct inode *inode = iocb->ki_filp->f_mapping->host; 907 struct fuse_conn *fc = get_fuse_conn(inode); 908 909 /* 910 * In auto invalidate mode, always update attributes on read. 911 * Otherwise, only update if we attempt to read past EOF (to ensure 912 * i_size is up to date). 913 */ 914 if (fc->auto_inval_data || 915 (iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) { 916 int err; 917 err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL); 918 if (err) 919 return err; 920 } 921 922 return generic_file_read_iter(iocb, to); 923 } 924 925 static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff, 926 loff_t pos, size_t count) 927 { 928 struct fuse_write_in *inarg = &req->misc.write.in; 929 struct fuse_write_out *outarg = &req->misc.write.out; 930 931 inarg->fh = ff->fh; 932 inarg->offset = pos; 933 inarg->size = count; 934 req->in.h.opcode = FUSE_WRITE; 935 req->in.h.nodeid = ff->nodeid; 936 req->in.numargs = 2; 937 if (ff->fc->minor < 9) 938 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; 939 else 940 req->in.args[0].size = sizeof(struct fuse_write_in); 941 req->in.args[0].value = inarg; 942 req->in.args[1].size = count; 943 req->out.numargs = 1; 944 req->out.args[0].size = sizeof(struct fuse_write_out); 945 req->out.args[0].value = outarg; 946 } 947 948 static size_t fuse_send_write(struct fuse_req *req, struct fuse_io_priv *io, 949 loff_t pos, size_t count, fl_owner_t owner) 950 { 951 struct file *file = io->file; 952 struct fuse_file *ff = file->private_data; 953 struct fuse_conn *fc = ff->fc; 954 struct fuse_write_in *inarg = &req->misc.write.in; 955 956 fuse_write_fill(req, ff, pos, count); 957 inarg->flags = file->f_flags; 958 if (owner != NULL) { 959 inarg->write_flags |= FUSE_WRITE_LOCKOWNER; 960 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 961 } 962 963 if (io->async) 964 return fuse_async_req_send(fc, req, count, io); 965 966 fuse_request_send(fc, req); 967 return req->misc.write.out.size; 968 } 969 970 bool fuse_write_update_size(struct inode *inode, loff_t pos) 971 { 972 struct fuse_conn *fc = get_fuse_conn(inode); 973 struct fuse_inode *fi = get_fuse_inode(inode); 974 bool ret = false; 975 976 spin_lock(&fc->lock); 977 fi->attr_version = ++fc->attr_version; 978 if (pos > inode->i_size) { 979 i_size_write(inode, pos); 980 ret = true; 981 } 982 spin_unlock(&fc->lock); 983 984 return ret; 985 } 986 987 static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, 988 struct inode *inode, loff_t pos, 989 size_t count) 990 { 991 size_t res; 992 unsigned offset; 993 unsigned i; 994 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file); 995 996 for (i = 0; i < req->num_pages; i++) 997 fuse_wait_on_page_writeback(inode, req->pages[i]->index); 998 999 res = fuse_send_write(req, &io, pos, count, NULL); 1000 1001 offset = req->page_descs[0].offset; 1002 count = res; 1003 for (i = 0; i < req->num_pages; i++) { 1004 struct page *page = req->pages[i]; 1005 1006 if (!req->out.h.error && !offset && count >= PAGE_SIZE) 1007 SetPageUptodate(page); 1008 1009 if (count > PAGE_SIZE - offset) 1010 count -= PAGE_SIZE - offset; 1011 else 1012 count = 0; 1013 offset = 0; 1014 1015 unlock_page(page); 1016 put_page(page); 1017 } 1018 1019 return res; 1020 } 1021 1022 static ssize_t fuse_fill_write_pages(struct fuse_req *req, 1023 struct address_space *mapping, 1024 struct iov_iter *ii, loff_t pos) 1025 { 1026 struct fuse_conn *fc = get_fuse_conn(mapping->host); 1027 unsigned offset = pos & (PAGE_SIZE - 1); 1028 size_t count = 0; 1029 int err; 1030 1031 req->in.argpages = 1; 1032 req->page_descs[0].offset = offset; 1033 1034 do { 1035 size_t tmp; 1036 struct page *page; 1037 pgoff_t index = pos >> PAGE_SHIFT; 1038 size_t bytes = min_t(size_t, PAGE_SIZE - offset, 1039 iov_iter_count(ii)); 1040 1041 bytes = min_t(size_t, bytes, fc->max_write - count); 1042 1043 again: 1044 err = -EFAULT; 1045 if (iov_iter_fault_in_readable(ii, bytes)) 1046 break; 1047 1048 err = -ENOMEM; 1049 page = grab_cache_page_write_begin(mapping, index, 0); 1050 if (!page) 1051 break; 1052 1053 if (mapping_writably_mapped(mapping)) 1054 flush_dcache_page(page); 1055 1056 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); 1057 flush_dcache_page(page); 1058 1059 iov_iter_advance(ii, tmp); 1060 if (!tmp) { 1061 unlock_page(page); 1062 put_page(page); 1063 bytes = min(bytes, iov_iter_single_seg_count(ii)); 1064 goto again; 1065 } 1066 1067 err = 0; 1068 req->pages[req->num_pages] = page; 1069 req->page_descs[req->num_pages].length = tmp; 1070 req->num_pages++; 1071 1072 count += tmp; 1073 pos += tmp; 1074 offset += tmp; 1075 if (offset == PAGE_SIZE) 1076 offset = 0; 1077 1078 if (!fc->big_writes) 1079 break; 1080 } while (iov_iter_count(ii) && count < fc->max_write && 1081 req->num_pages < req->max_pages && offset == 0); 1082 1083 return count > 0 ? count : err; 1084 } 1085 1086 static inline unsigned fuse_wr_pages(loff_t pos, size_t len) 1087 { 1088 return min_t(unsigned, 1089 ((pos + len - 1) >> PAGE_SHIFT) - 1090 (pos >> PAGE_SHIFT) + 1, 1091 FUSE_MAX_PAGES_PER_REQ); 1092 } 1093 1094 static ssize_t fuse_perform_write(struct file *file, 1095 struct address_space *mapping, 1096 struct iov_iter *ii, loff_t pos) 1097 { 1098 struct inode *inode = mapping->host; 1099 struct fuse_conn *fc = get_fuse_conn(inode); 1100 struct fuse_inode *fi = get_fuse_inode(inode); 1101 int err = 0; 1102 ssize_t res = 0; 1103 1104 if (is_bad_inode(inode)) 1105 return -EIO; 1106 1107 if (inode->i_size < pos + iov_iter_count(ii)) 1108 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 1109 1110 do { 1111 struct fuse_req *req; 1112 ssize_t count; 1113 unsigned nr_pages = fuse_wr_pages(pos, iov_iter_count(ii)); 1114 1115 req = fuse_get_req(fc, nr_pages); 1116 if (IS_ERR(req)) { 1117 err = PTR_ERR(req); 1118 break; 1119 } 1120 1121 count = fuse_fill_write_pages(req, mapping, ii, pos); 1122 if (count <= 0) { 1123 err = count; 1124 } else { 1125 size_t num_written; 1126 1127 num_written = fuse_send_write_pages(req, file, inode, 1128 pos, count); 1129 err = req->out.h.error; 1130 if (!err) { 1131 res += num_written; 1132 pos += num_written; 1133 1134 /* break out of the loop on short write */ 1135 if (num_written != count) 1136 err = -EIO; 1137 } 1138 } 1139 fuse_put_request(fc, req); 1140 } while (!err && iov_iter_count(ii)); 1141 1142 if (res > 0) 1143 fuse_write_update_size(inode, pos); 1144 1145 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 1146 fuse_invalidate_attr(inode); 1147 1148 return res > 0 ? res : err; 1149 } 1150 1151 static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1152 { 1153 struct file *file = iocb->ki_filp; 1154 struct address_space *mapping = file->f_mapping; 1155 ssize_t written = 0; 1156 ssize_t written_buffered = 0; 1157 struct inode *inode = mapping->host; 1158 ssize_t err; 1159 loff_t endbyte = 0; 1160 1161 if (get_fuse_conn(inode)->writeback_cache) { 1162 /* Update size (EOF optimization) and mode (SUID clearing) */ 1163 err = fuse_update_attributes(mapping->host, NULL, file, NULL); 1164 if (err) 1165 return err; 1166 1167 return generic_file_write_iter(iocb, from); 1168 } 1169 1170 inode_lock(inode); 1171 1172 /* We can write back this queue in page reclaim */ 1173 current->backing_dev_info = inode_to_bdi(inode); 1174 1175 err = generic_write_checks(iocb, from); 1176 if (err <= 0) 1177 goto out; 1178 1179 err = file_remove_privs(file); 1180 if (err) 1181 goto out; 1182 1183 err = file_update_time(file); 1184 if (err) 1185 goto out; 1186 1187 if (iocb->ki_flags & IOCB_DIRECT) { 1188 loff_t pos = iocb->ki_pos; 1189 written = generic_file_direct_write(iocb, from); 1190 if (written < 0 || !iov_iter_count(from)) 1191 goto out; 1192 1193 pos += written; 1194 1195 written_buffered = fuse_perform_write(file, mapping, from, pos); 1196 if (written_buffered < 0) { 1197 err = written_buffered; 1198 goto out; 1199 } 1200 endbyte = pos + written_buffered - 1; 1201 1202 err = filemap_write_and_wait_range(file->f_mapping, pos, 1203 endbyte); 1204 if (err) 1205 goto out; 1206 1207 invalidate_mapping_pages(file->f_mapping, 1208 pos >> PAGE_SHIFT, 1209 endbyte >> PAGE_SHIFT); 1210 1211 written += written_buffered; 1212 iocb->ki_pos = pos + written_buffered; 1213 } else { 1214 written = fuse_perform_write(file, mapping, from, iocb->ki_pos); 1215 if (written >= 0) 1216 iocb->ki_pos += written; 1217 } 1218 out: 1219 current->backing_dev_info = NULL; 1220 inode_unlock(inode); 1221 1222 return written ? written : err; 1223 } 1224 1225 static inline void fuse_page_descs_length_init(struct fuse_req *req, 1226 unsigned index, unsigned nr_pages) 1227 { 1228 int i; 1229 1230 for (i = index; i < index + nr_pages; i++) 1231 req->page_descs[i].length = PAGE_SIZE - 1232 req->page_descs[i].offset; 1233 } 1234 1235 static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii) 1236 { 1237 return (unsigned long)ii->iov->iov_base + ii->iov_offset; 1238 } 1239 1240 static inline size_t fuse_get_frag_size(const struct iov_iter *ii, 1241 size_t max_size) 1242 { 1243 return min(iov_iter_single_seg_count(ii), max_size); 1244 } 1245 1246 static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii, 1247 size_t *nbytesp, int write) 1248 { 1249 size_t nbytes = 0; /* # bytes already packed in req */ 1250 ssize_t ret = 0; 1251 1252 /* Special case for kernel I/O: can copy directly into the buffer */ 1253 if (ii->type & ITER_KVEC) { 1254 unsigned long user_addr = fuse_get_user_addr(ii); 1255 size_t frag_size = fuse_get_frag_size(ii, *nbytesp); 1256 1257 if (write) 1258 req->in.args[1].value = (void *) user_addr; 1259 else 1260 req->out.args[0].value = (void *) user_addr; 1261 1262 iov_iter_advance(ii, frag_size); 1263 *nbytesp = frag_size; 1264 return 0; 1265 } 1266 1267 while (nbytes < *nbytesp && req->num_pages < req->max_pages) { 1268 unsigned npages; 1269 size_t start; 1270 ret = iov_iter_get_pages(ii, &req->pages[req->num_pages], 1271 *nbytesp - nbytes, 1272 req->max_pages - req->num_pages, 1273 &start); 1274 if (ret < 0) 1275 break; 1276 1277 iov_iter_advance(ii, ret); 1278 nbytes += ret; 1279 1280 ret += start; 1281 npages = (ret + PAGE_SIZE - 1) / PAGE_SIZE; 1282 1283 req->page_descs[req->num_pages].offset = start; 1284 fuse_page_descs_length_init(req, req->num_pages, npages); 1285 1286 req->num_pages += npages; 1287 req->page_descs[req->num_pages - 1].length -= 1288 (PAGE_SIZE - ret) & (PAGE_SIZE - 1); 1289 } 1290 1291 if (write) 1292 req->in.argpages = 1; 1293 else 1294 req->out.argpages = 1; 1295 1296 *nbytesp = nbytes; 1297 1298 return ret < 0 ? ret : 0; 1299 } 1300 1301 static inline int fuse_iter_npages(const struct iov_iter *ii_p) 1302 { 1303 return iov_iter_npages(ii_p, FUSE_MAX_PAGES_PER_REQ); 1304 } 1305 1306 ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, 1307 loff_t *ppos, int flags) 1308 { 1309 int write = flags & FUSE_DIO_WRITE; 1310 int cuse = flags & FUSE_DIO_CUSE; 1311 struct file *file = io->file; 1312 struct inode *inode = file->f_mapping->host; 1313 struct fuse_file *ff = file->private_data; 1314 struct fuse_conn *fc = ff->fc; 1315 size_t nmax = write ? fc->max_write : fc->max_read; 1316 loff_t pos = *ppos; 1317 size_t count = iov_iter_count(iter); 1318 pgoff_t idx_from = pos >> PAGE_SHIFT; 1319 pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT; 1320 ssize_t res = 0; 1321 struct fuse_req *req; 1322 int err = 0; 1323 1324 if (io->async) 1325 req = fuse_get_req_for_background(fc, fuse_iter_npages(iter)); 1326 else 1327 req = fuse_get_req(fc, fuse_iter_npages(iter)); 1328 if (IS_ERR(req)) 1329 return PTR_ERR(req); 1330 1331 if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) { 1332 if (!write) 1333 inode_lock(inode); 1334 fuse_sync_writes(inode); 1335 if (!write) 1336 inode_unlock(inode); 1337 } 1338 1339 while (count) { 1340 size_t nres; 1341 fl_owner_t owner = current->files; 1342 size_t nbytes = min(count, nmax); 1343 err = fuse_get_user_pages(req, iter, &nbytes, write); 1344 if (err && !nbytes) 1345 break; 1346 1347 if (write) 1348 nres = fuse_send_write(req, io, pos, nbytes, owner); 1349 else 1350 nres = fuse_send_read(req, io, pos, nbytes, owner); 1351 1352 if (!io->async) 1353 fuse_release_user_pages(req, !write); 1354 if (req->out.h.error) { 1355 err = req->out.h.error; 1356 break; 1357 } else if (nres > nbytes) { 1358 res = 0; 1359 err = -EIO; 1360 break; 1361 } 1362 count -= nres; 1363 res += nres; 1364 pos += nres; 1365 if (nres != nbytes) 1366 break; 1367 if (count) { 1368 fuse_put_request(fc, req); 1369 if (io->async) 1370 req = fuse_get_req_for_background(fc, 1371 fuse_iter_npages(iter)); 1372 else 1373 req = fuse_get_req(fc, fuse_iter_npages(iter)); 1374 if (IS_ERR(req)) 1375 break; 1376 } 1377 } 1378 if (!IS_ERR(req)) 1379 fuse_put_request(fc, req); 1380 if (res > 0) 1381 *ppos = pos; 1382 1383 return res > 0 ? res : err; 1384 } 1385 EXPORT_SYMBOL_GPL(fuse_direct_io); 1386 1387 static ssize_t __fuse_direct_read(struct fuse_io_priv *io, 1388 struct iov_iter *iter, 1389 loff_t *ppos) 1390 { 1391 ssize_t res; 1392 struct file *file = io->file; 1393 struct inode *inode = file_inode(file); 1394 1395 if (is_bad_inode(inode)) 1396 return -EIO; 1397 1398 res = fuse_direct_io(io, iter, ppos, 0); 1399 1400 fuse_invalidate_attr(inode); 1401 1402 return res; 1403 } 1404 1405 static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to) 1406 { 1407 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb->ki_filp); 1408 return __fuse_direct_read(&io, to, &iocb->ki_pos); 1409 } 1410 1411 static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) 1412 { 1413 struct file *file = iocb->ki_filp; 1414 struct inode *inode = file_inode(file); 1415 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file); 1416 ssize_t res; 1417 1418 if (is_bad_inode(inode)) 1419 return -EIO; 1420 1421 /* Don't allow parallel writes to the same file */ 1422 inode_lock(inode); 1423 res = generic_write_checks(iocb, from); 1424 if (res > 0) 1425 res = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE); 1426 fuse_invalidate_attr(inode); 1427 if (res > 0) 1428 fuse_write_update_size(inode, iocb->ki_pos); 1429 inode_unlock(inode); 1430 1431 return res; 1432 } 1433 1434 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) 1435 { 1436 int i; 1437 1438 for (i = 0; i < req->num_pages; i++) 1439 __free_page(req->pages[i]); 1440 1441 if (req->ff) 1442 fuse_file_put(req->ff, false); 1443 } 1444 1445 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) 1446 { 1447 struct inode *inode = req->inode; 1448 struct fuse_inode *fi = get_fuse_inode(inode); 1449 struct backing_dev_info *bdi = inode_to_bdi(inode); 1450 int i; 1451 1452 list_del(&req->writepages_entry); 1453 for (i = 0; i < req->num_pages; i++) { 1454 dec_wb_stat(&bdi->wb, WB_WRITEBACK); 1455 dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP); 1456 wb_writeout_inc(&bdi->wb); 1457 } 1458 wake_up(&fi->page_waitq); 1459 } 1460 1461 /* Called under fc->lock, may release and reacquire it */ 1462 static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req, 1463 loff_t size) 1464 __releases(fc->lock) 1465 __acquires(fc->lock) 1466 { 1467 struct fuse_inode *fi = get_fuse_inode(req->inode); 1468 struct fuse_write_in *inarg = &req->misc.write.in; 1469 __u64 data_size = req->num_pages * PAGE_SIZE; 1470 1471 if (!fc->connected) 1472 goto out_free; 1473 1474 if (inarg->offset + data_size <= size) { 1475 inarg->size = data_size; 1476 } else if (inarg->offset < size) { 1477 inarg->size = size - inarg->offset; 1478 } else { 1479 /* Got truncated off completely */ 1480 goto out_free; 1481 } 1482 1483 req->in.args[1].size = inarg->size; 1484 fi->writectr++; 1485 fuse_request_send_background_locked(fc, req); 1486 return; 1487 1488 out_free: 1489 fuse_writepage_finish(fc, req); 1490 spin_unlock(&fc->lock); 1491 fuse_writepage_free(fc, req); 1492 fuse_put_request(fc, req); 1493 spin_lock(&fc->lock); 1494 } 1495 1496 /* 1497 * If fi->writectr is positive (no truncate or fsync going on) send 1498 * all queued writepage requests. 1499 * 1500 * Called with fc->lock 1501 */ 1502 void fuse_flush_writepages(struct inode *inode) 1503 __releases(fc->lock) 1504 __acquires(fc->lock) 1505 { 1506 struct fuse_conn *fc = get_fuse_conn(inode); 1507 struct fuse_inode *fi = get_fuse_inode(inode); 1508 size_t crop = i_size_read(inode); 1509 struct fuse_req *req; 1510 1511 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { 1512 req = list_entry(fi->queued_writes.next, struct fuse_req, list); 1513 list_del_init(&req->list); 1514 fuse_send_writepage(fc, req, crop); 1515 } 1516 } 1517 1518 static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req) 1519 { 1520 struct inode *inode = req->inode; 1521 struct fuse_inode *fi = get_fuse_inode(inode); 1522 1523 mapping_set_error(inode->i_mapping, req->out.h.error); 1524 spin_lock(&fc->lock); 1525 while (req->misc.write.next) { 1526 struct fuse_conn *fc = get_fuse_conn(inode); 1527 struct fuse_write_in *inarg = &req->misc.write.in; 1528 struct fuse_req *next = req->misc.write.next; 1529 req->misc.write.next = next->misc.write.next; 1530 next->misc.write.next = NULL; 1531 next->ff = fuse_file_get(req->ff); 1532 list_add(&next->writepages_entry, &fi->writepages); 1533 1534 /* 1535 * Skip fuse_flush_writepages() to make it easy to crop requests 1536 * based on primary request size. 1537 * 1538 * 1st case (trivial): there are no concurrent activities using 1539 * fuse_set/release_nowrite. Then we're on safe side because 1540 * fuse_flush_writepages() would call fuse_send_writepage() 1541 * anyway. 1542 * 1543 * 2nd case: someone called fuse_set_nowrite and it is waiting 1544 * now for completion of all in-flight requests. This happens 1545 * rarely and no more than once per page, so this should be 1546 * okay. 1547 * 1548 * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle 1549 * of fuse_set_nowrite..fuse_release_nowrite section. The fact 1550 * that fuse_set_nowrite returned implies that all in-flight 1551 * requests were completed along with all of their secondary 1552 * requests. Further primary requests are blocked by negative 1553 * writectr. Hence there cannot be any in-flight requests and 1554 * no invocations of fuse_writepage_end() while we're in 1555 * fuse_set_nowrite..fuse_release_nowrite section. 1556 */ 1557 fuse_send_writepage(fc, next, inarg->offset + inarg->size); 1558 } 1559 fi->writectr--; 1560 fuse_writepage_finish(fc, req); 1561 spin_unlock(&fc->lock); 1562 fuse_writepage_free(fc, req); 1563 } 1564 1565 static struct fuse_file *__fuse_write_file_get(struct fuse_conn *fc, 1566 struct fuse_inode *fi) 1567 { 1568 struct fuse_file *ff = NULL; 1569 1570 spin_lock(&fc->lock); 1571 if (!list_empty(&fi->write_files)) { 1572 ff = list_entry(fi->write_files.next, struct fuse_file, 1573 write_entry); 1574 fuse_file_get(ff); 1575 } 1576 spin_unlock(&fc->lock); 1577 1578 return ff; 1579 } 1580 1581 static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc, 1582 struct fuse_inode *fi) 1583 { 1584 struct fuse_file *ff = __fuse_write_file_get(fc, fi); 1585 WARN_ON(!ff); 1586 return ff; 1587 } 1588 1589 int fuse_write_inode(struct inode *inode, struct writeback_control *wbc) 1590 { 1591 struct fuse_conn *fc = get_fuse_conn(inode); 1592 struct fuse_inode *fi = get_fuse_inode(inode); 1593 struct fuse_file *ff; 1594 int err; 1595 1596 ff = __fuse_write_file_get(fc, fi); 1597 err = fuse_flush_times(inode, ff); 1598 if (ff) 1599 fuse_file_put(ff, 0); 1600 1601 return err; 1602 } 1603 1604 static int fuse_writepage_locked(struct page *page) 1605 { 1606 struct address_space *mapping = page->mapping; 1607 struct inode *inode = mapping->host; 1608 struct fuse_conn *fc = get_fuse_conn(inode); 1609 struct fuse_inode *fi = get_fuse_inode(inode); 1610 struct fuse_req *req; 1611 struct page *tmp_page; 1612 int error = -ENOMEM; 1613 1614 set_page_writeback(page); 1615 1616 req = fuse_request_alloc_nofs(1); 1617 if (!req) 1618 goto err; 1619 1620 /* writeback always goes to bg_queue */ 1621 __set_bit(FR_BACKGROUND, &req->flags); 1622 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 1623 if (!tmp_page) 1624 goto err_free; 1625 1626 error = -EIO; 1627 req->ff = fuse_write_file_get(fc, fi); 1628 if (!req->ff) 1629 goto err_nofile; 1630 1631 fuse_write_fill(req, req->ff, page_offset(page), 0); 1632 1633 copy_highpage(tmp_page, page); 1634 req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; 1635 req->misc.write.next = NULL; 1636 req->in.argpages = 1; 1637 req->num_pages = 1; 1638 req->pages[0] = tmp_page; 1639 req->page_descs[0].offset = 0; 1640 req->page_descs[0].length = PAGE_SIZE; 1641 req->end = fuse_writepage_end; 1642 req->inode = inode; 1643 1644 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); 1645 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); 1646 1647 spin_lock(&fc->lock); 1648 list_add(&req->writepages_entry, &fi->writepages); 1649 list_add_tail(&req->list, &fi->queued_writes); 1650 fuse_flush_writepages(inode); 1651 spin_unlock(&fc->lock); 1652 1653 end_page_writeback(page); 1654 1655 return 0; 1656 1657 err_nofile: 1658 __free_page(tmp_page); 1659 err_free: 1660 fuse_request_free(req); 1661 err: 1662 end_page_writeback(page); 1663 return error; 1664 } 1665 1666 static int fuse_writepage(struct page *page, struct writeback_control *wbc) 1667 { 1668 int err; 1669 1670 if (fuse_page_is_writeback(page->mapping->host, page->index)) { 1671 /* 1672 * ->writepages() should be called for sync() and friends. We 1673 * should only get here on direct reclaim and then we are 1674 * allowed to skip a page which is already in flight 1675 */ 1676 WARN_ON(wbc->sync_mode == WB_SYNC_ALL); 1677 1678 redirty_page_for_writepage(wbc, page); 1679 return 0; 1680 } 1681 1682 err = fuse_writepage_locked(page); 1683 unlock_page(page); 1684 1685 return err; 1686 } 1687 1688 struct fuse_fill_wb_data { 1689 struct fuse_req *req; 1690 struct fuse_file *ff; 1691 struct inode *inode; 1692 struct page **orig_pages; 1693 }; 1694 1695 static void fuse_writepages_send(struct fuse_fill_wb_data *data) 1696 { 1697 struct fuse_req *req = data->req; 1698 struct inode *inode = data->inode; 1699 struct fuse_conn *fc = get_fuse_conn(inode); 1700 struct fuse_inode *fi = get_fuse_inode(inode); 1701 int num_pages = req->num_pages; 1702 int i; 1703 1704 req->ff = fuse_file_get(data->ff); 1705 spin_lock(&fc->lock); 1706 list_add_tail(&req->list, &fi->queued_writes); 1707 fuse_flush_writepages(inode); 1708 spin_unlock(&fc->lock); 1709 1710 for (i = 0; i < num_pages; i++) 1711 end_page_writeback(data->orig_pages[i]); 1712 } 1713 1714 static bool fuse_writepage_in_flight(struct fuse_req *new_req, 1715 struct page *page) 1716 { 1717 struct fuse_conn *fc = get_fuse_conn(new_req->inode); 1718 struct fuse_inode *fi = get_fuse_inode(new_req->inode); 1719 struct fuse_req *tmp; 1720 struct fuse_req *old_req; 1721 bool found = false; 1722 pgoff_t curr_index; 1723 1724 BUG_ON(new_req->num_pages != 0); 1725 1726 spin_lock(&fc->lock); 1727 list_del(&new_req->writepages_entry); 1728 list_for_each_entry(old_req, &fi->writepages, writepages_entry) { 1729 BUG_ON(old_req->inode != new_req->inode); 1730 curr_index = old_req->misc.write.in.offset >> PAGE_SHIFT; 1731 if (curr_index <= page->index && 1732 page->index < curr_index + old_req->num_pages) { 1733 found = true; 1734 break; 1735 } 1736 } 1737 if (!found) { 1738 list_add(&new_req->writepages_entry, &fi->writepages); 1739 goto out_unlock; 1740 } 1741 1742 new_req->num_pages = 1; 1743 for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) { 1744 BUG_ON(tmp->inode != new_req->inode); 1745 curr_index = tmp->misc.write.in.offset >> PAGE_SHIFT; 1746 if (tmp->num_pages == 1 && 1747 curr_index == page->index) { 1748 old_req = tmp; 1749 } 1750 } 1751 1752 if (old_req->num_pages == 1 && test_bit(FR_PENDING, &old_req->flags)) { 1753 struct backing_dev_info *bdi = inode_to_bdi(page->mapping->host); 1754 1755 copy_highpage(old_req->pages[0], page); 1756 spin_unlock(&fc->lock); 1757 1758 dec_wb_stat(&bdi->wb, WB_WRITEBACK); 1759 dec_zone_page_state(page, NR_WRITEBACK_TEMP); 1760 wb_writeout_inc(&bdi->wb); 1761 fuse_writepage_free(fc, new_req); 1762 fuse_request_free(new_req); 1763 goto out; 1764 } else { 1765 new_req->misc.write.next = old_req->misc.write.next; 1766 old_req->misc.write.next = new_req; 1767 } 1768 out_unlock: 1769 spin_unlock(&fc->lock); 1770 out: 1771 return found; 1772 } 1773 1774 static int fuse_writepages_fill(struct page *page, 1775 struct writeback_control *wbc, void *_data) 1776 { 1777 struct fuse_fill_wb_data *data = _data; 1778 struct fuse_req *req = data->req; 1779 struct inode *inode = data->inode; 1780 struct fuse_conn *fc = get_fuse_conn(inode); 1781 struct page *tmp_page; 1782 bool is_writeback; 1783 int err; 1784 1785 if (!data->ff) { 1786 err = -EIO; 1787 data->ff = fuse_write_file_get(fc, get_fuse_inode(inode)); 1788 if (!data->ff) 1789 goto out_unlock; 1790 } 1791 1792 /* 1793 * Being under writeback is unlikely but possible. For example direct 1794 * read to an mmaped fuse file will set the page dirty twice; once when 1795 * the pages are faulted with get_user_pages(), and then after the read 1796 * completed. 1797 */ 1798 is_writeback = fuse_page_is_writeback(inode, page->index); 1799 1800 if (req && req->num_pages && 1801 (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ || 1802 (req->num_pages + 1) * PAGE_SIZE > fc->max_write || 1803 data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) { 1804 fuse_writepages_send(data); 1805 data->req = NULL; 1806 } 1807 err = -ENOMEM; 1808 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 1809 if (!tmp_page) 1810 goto out_unlock; 1811 1812 /* 1813 * The page must not be redirtied until the writeout is completed 1814 * (i.e. userspace has sent a reply to the write request). Otherwise 1815 * there could be more than one temporary page instance for each real 1816 * page. 1817 * 1818 * This is ensured by holding the page lock in page_mkwrite() while 1819 * checking fuse_page_is_writeback(). We already hold the page lock 1820 * since clear_page_dirty_for_io() and keep it held until we add the 1821 * request to the fi->writepages list and increment req->num_pages. 1822 * After this fuse_page_is_writeback() will indicate that the page is 1823 * under writeback, so we can release the page lock. 1824 */ 1825 if (data->req == NULL) { 1826 struct fuse_inode *fi = get_fuse_inode(inode); 1827 1828 err = -ENOMEM; 1829 req = fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ); 1830 if (!req) { 1831 __free_page(tmp_page); 1832 goto out_unlock; 1833 } 1834 1835 fuse_write_fill(req, data->ff, page_offset(page), 0); 1836 req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; 1837 req->misc.write.next = NULL; 1838 req->in.argpages = 1; 1839 __set_bit(FR_BACKGROUND, &req->flags); 1840 req->num_pages = 0; 1841 req->end = fuse_writepage_end; 1842 req->inode = inode; 1843 1844 spin_lock(&fc->lock); 1845 list_add(&req->writepages_entry, &fi->writepages); 1846 spin_unlock(&fc->lock); 1847 1848 data->req = req; 1849 } 1850 set_page_writeback(page); 1851 1852 copy_highpage(tmp_page, page); 1853 req->pages[req->num_pages] = tmp_page; 1854 req->page_descs[req->num_pages].offset = 0; 1855 req->page_descs[req->num_pages].length = PAGE_SIZE; 1856 1857 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); 1858 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); 1859 1860 err = 0; 1861 if (is_writeback && fuse_writepage_in_flight(req, page)) { 1862 end_page_writeback(page); 1863 data->req = NULL; 1864 goto out_unlock; 1865 } 1866 data->orig_pages[req->num_pages] = page; 1867 1868 /* 1869 * Protected by fc->lock against concurrent access by 1870 * fuse_page_is_writeback(). 1871 */ 1872 spin_lock(&fc->lock); 1873 req->num_pages++; 1874 spin_unlock(&fc->lock); 1875 1876 out_unlock: 1877 unlock_page(page); 1878 1879 return err; 1880 } 1881 1882 static int fuse_writepages(struct address_space *mapping, 1883 struct writeback_control *wbc) 1884 { 1885 struct inode *inode = mapping->host; 1886 struct fuse_fill_wb_data data; 1887 int err; 1888 1889 err = -EIO; 1890 if (is_bad_inode(inode)) 1891 goto out; 1892 1893 data.inode = inode; 1894 data.req = NULL; 1895 data.ff = NULL; 1896 1897 err = -ENOMEM; 1898 data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, 1899 sizeof(struct page *), 1900 GFP_NOFS); 1901 if (!data.orig_pages) 1902 goto out; 1903 1904 err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data); 1905 if (data.req) { 1906 /* Ignore errors if we can write at least one page */ 1907 BUG_ON(!data.req->num_pages); 1908 fuse_writepages_send(&data); 1909 err = 0; 1910 } 1911 if (data.ff) 1912 fuse_file_put(data.ff, false); 1913 1914 kfree(data.orig_pages); 1915 out: 1916 return err; 1917 } 1918 1919 /* 1920 * It's worthy to make sure that space is reserved on disk for the write, 1921 * but how to implement it without killing performance need more thinking. 1922 */ 1923 static int fuse_write_begin(struct file *file, struct address_space *mapping, 1924 loff_t pos, unsigned len, unsigned flags, 1925 struct page **pagep, void **fsdata) 1926 { 1927 pgoff_t index = pos >> PAGE_SHIFT; 1928 struct fuse_conn *fc = get_fuse_conn(file_inode(file)); 1929 struct page *page; 1930 loff_t fsize; 1931 int err = -ENOMEM; 1932 1933 WARN_ON(!fc->writeback_cache); 1934 1935 page = grab_cache_page_write_begin(mapping, index, flags); 1936 if (!page) 1937 goto error; 1938 1939 fuse_wait_on_page_writeback(mapping->host, page->index); 1940 1941 if (PageUptodate(page) || len == PAGE_SIZE) 1942 goto success; 1943 /* 1944 * Check if the start this page comes after the end of file, in which 1945 * case the readpage can be optimized away. 1946 */ 1947 fsize = i_size_read(mapping->host); 1948 if (fsize <= (pos & PAGE_MASK)) { 1949 size_t off = pos & ~PAGE_MASK; 1950 if (off) 1951 zero_user_segment(page, 0, off); 1952 goto success; 1953 } 1954 err = fuse_do_readpage(file, page); 1955 if (err) 1956 goto cleanup; 1957 success: 1958 *pagep = page; 1959 return 0; 1960 1961 cleanup: 1962 unlock_page(page); 1963 put_page(page); 1964 error: 1965 return err; 1966 } 1967 1968 static int fuse_write_end(struct file *file, struct address_space *mapping, 1969 loff_t pos, unsigned len, unsigned copied, 1970 struct page *page, void *fsdata) 1971 { 1972 struct inode *inode = page->mapping->host; 1973 1974 if (!PageUptodate(page)) { 1975 /* Zero any unwritten bytes at the end of the page */ 1976 size_t endoff = (pos + copied) & ~PAGE_MASK; 1977 if (endoff) 1978 zero_user_segment(page, endoff, PAGE_SIZE); 1979 SetPageUptodate(page); 1980 } 1981 1982 fuse_write_update_size(inode, pos + copied); 1983 set_page_dirty(page); 1984 unlock_page(page); 1985 put_page(page); 1986 1987 return copied; 1988 } 1989 1990 static int fuse_launder_page(struct page *page) 1991 { 1992 int err = 0; 1993 if (clear_page_dirty_for_io(page)) { 1994 struct inode *inode = page->mapping->host; 1995 err = fuse_writepage_locked(page); 1996 if (!err) 1997 fuse_wait_on_page_writeback(inode, page->index); 1998 } 1999 return err; 2000 } 2001 2002 /* 2003 * Write back dirty pages now, because there may not be any suitable 2004 * open files later 2005 */ 2006 static void fuse_vma_close(struct vm_area_struct *vma) 2007 { 2008 filemap_write_and_wait(vma->vm_file->f_mapping); 2009 } 2010 2011 /* 2012 * Wait for writeback against this page to complete before allowing it 2013 * to be marked dirty again, and hence written back again, possibly 2014 * before the previous writepage completed. 2015 * 2016 * Block here, instead of in ->writepage(), so that the userspace fs 2017 * can only block processes actually operating on the filesystem. 2018 * 2019 * Otherwise unprivileged userspace fs would be able to block 2020 * unrelated: 2021 * 2022 * - page migration 2023 * - sync(2) 2024 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER 2025 */ 2026 static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 2027 { 2028 struct page *page = vmf->page; 2029 struct inode *inode = file_inode(vma->vm_file); 2030 2031 file_update_time(vma->vm_file); 2032 lock_page(page); 2033 if (page->mapping != inode->i_mapping) { 2034 unlock_page(page); 2035 return VM_FAULT_NOPAGE; 2036 } 2037 2038 fuse_wait_on_page_writeback(inode, page->index); 2039 return VM_FAULT_LOCKED; 2040 } 2041 2042 static const struct vm_operations_struct fuse_file_vm_ops = { 2043 .close = fuse_vma_close, 2044 .fault = filemap_fault, 2045 .map_pages = filemap_map_pages, 2046 .page_mkwrite = fuse_page_mkwrite, 2047 }; 2048 2049 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) 2050 { 2051 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 2052 fuse_link_write_file(file); 2053 2054 file_accessed(file); 2055 vma->vm_ops = &fuse_file_vm_ops; 2056 return 0; 2057 } 2058 2059 static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma) 2060 { 2061 /* Can't provide the coherency needed for MAP_SHARED */ 2062 if (vma->vm_flags & VM_MAYSHARE) 2063 return -ENODEV; 2064 2065 invalidate_inode_pages2(file->f_mapping); 2066 2067 return generic_file_mmap(file, vma); 2068 } 2069 2070 static int convert_fuse_file_lock(const struct fuse_file_lock *ffl, 2071 struct file_lock *fl) 2072 { 2073 switch (ffl->type) { 2074 case F_UNLCK: 2075 break; 2076 2077 case F_RDLCK: 2078 case F_WRLCK: 2079 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX || 2080 ffl->end < ffl->start) 2081 return -EIO; 2082 2083 fl->fl_start = ffl->start; 2084 fl->fl_end = ffl->end; 2085 fl->fl_pid = ffl->pid; 2086 break; 2087 2088 default: 2089 return -EIO; 2090 } 2091 fl->fl_type = ffl->type; 2092 return 0; 2093 } 2094 2095 static void fuse_lk_fill(struct fuse_args *args, struct file *file, 2096 const struct file_lock *fl, int opcode, pid_t pid, 2097 int flock, struct fuse_lk_in *inarg) 2098 { 2099 struct inode *inode = file_inode(file); 2100 struct fuse_conn *fc = get_fuse_conn(inode); 2101 struct fuse_file *ff = file->private_data; 2102 2103 memset(inarg, 0, sizeof(*inarg)); 2104 inarg->fh = ff->fh; 2105 inarg->owner = fuse_lock_owner_id(fc, fl->fl_owner); 2106 inarg->lk.start = fl->fl_start; 2107 inarg->lk.end = fl->fl_end; 2108 inarg->lk.type = fl->fl_type; 2109 inarg->lk.pid = pid; 2110 if (flock) 2111 inarg->lk_flags |= FUSE_LK_FLOCK; 2112 args->in.h.opcode = opcode; 2113 args->in.h.nodeid = get_node_id(inode); 2114 args->in.numargs = 1; 2115 args->in.args[0].size = sizeof(*inarg); 2116 args->in.args[0].value = inarg; 2117 } 2118 2119 static int fuse_getlk(struct file *file, struct file_lock *fl) 2120 { 2121 struct inode *inode = file_inode(file); 2122 struct fuse_conn *fc = get_fuse_conn(inode); 2123 FUSE_ARGS(args); 2124 struct fuse_lk_in inarg; 2125 struct fuse_lk_out outarg; 2126 int err; 2127 2128 fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg); 2129 args.out.numargs = 1; 2130 args.out.args[0].size = sizeof(outarg); 2131 args.out.args[0].value = &outarg; 2132 err = fuse_simple_request(fc, &args); 2133 if (!err) 2134 err = convert_fuse_file_lock(&outarg.lk, fl); 2135 2136 return err; 2137 } 2138 2139 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) 2140 { 2141 struct inode *inode = file_inode(file); 2142 struct fuse_conn *fc = get_fuse_conn(inode); 2143 FUSE_ARGS(args); 2144 struct fuse_lk_in inarg; 2145 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; 2146 pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0; 2147 int err; 2148 2149 if (fl->fl_lmops && fl->fl_lmops->lm_grant) { 2150 /* NLM needs asynchronous locks, which we don't support yet */ 2151 return -ENOLCK; 2152 } 2153 2154 /* Unlock on close is handled by the flush method */ 2155 if (fl->fl_flags & FL_CLOSE) 2156 return 0; 2157 2158 fuse_lk_fill(&args, file, fl, opcode, pid, flock, &inarg); 2159 err = fuse_simple_request(fc, &args); 2160 2161 /* locking is restartable */ 2162 if (err == -EINTR) 2163 err = -ERESTARTSYS; 2164 2165 return err; 2166 } 2167 2168 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) 2169 { 2170 struct inode *inode = file_inode(file); 2171 struct fuse_conn *fc = get_fuse_conn(inode); 2172 int err; 2173 2174 if (cmd == F_CANCELLK) { 2175 err = 0; 2176 } else if (cmd == F_GETLK) { 2177 if (fc->no_lock) { 2178 posix_test_lock(file, fl); 2179 err = 0; 2180 } else 2181 err = fuse_getlk(file, fl); 2182 } else { 2183 if (fc->no_lock) 2184 err = posix_lock_file(file, fl, NULL); 2185 else 2186 err = fuse_setlk(file, fl, 0); 2187 } 2188 return err; 2189 } 2190 2191 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) 2192 { 2193 struct inode *inode = file_inode(file); 2194 struct fuse_conn *fc = get_fuse_conn(inode); 2195 int err; 2196 2197 if (fc->no_flock) { 2198 err = locks_lock_file_wait(file, fl); 2199 } else { 2200 struct fuse_file *ff = file->private_data; 2201 2202 /* emulate flock with POSIX locks */ 2203 ff->flock = true; 2204 err = fuse_setlk(file, fl, 1); 2205 } 2206 2207 return err; 2208 } 2209 2210 static sector_t fuse_bmap(struct address_space *mapping, sector_t block) 2211 { 2212 struct inode *inode = mapping->host; 2213 struct fuse_conn *fc = get_fuse_conn(inode); 2214 FUSE_ARGS(args); 2215 struct fuse_bmap_in inarg; 2216 struct fuse_bmap_out outarg; 2217 int err; 2218 2219 if (!inode->i_sb->s_bdev || fc->no_bmap) 2220 return 0; 2221 2222 memset(&inarg, 0, sizeof(inarg)); 2223 inarg.block = block; 2224 inarg.blocksize = inode->i_sb->s_blocksize; 2225 args.in.h.opcode = FUSE_BMAP; 2226 args.in.h.nodeid = get_node_id(inode); 2227 args.in.numargs = 1; 2228 args.in.args[0].size = sizeof(inarg); 2229 args.in.args[0].value = &inarg; 2230 args.out.numargs = 1; 2231 args.out.args[0].size = sizeof(outarg); 2232 args.out.args[0].value = &outarg; 2233 err = fuse_simple_request(fc, &args); 2234 if (err == -ENOSYS) 2235 fc->no_bmap = 1; 2236 2237 return err ? 0 : outarg.block; 2238 } 2239 2240 static loff_t fuse_lseek(struct file *file, loff_t offset, int whence) 2241 { 2242 struct inode *inode = file->f_mapping->host; 2243 struct fuse_conn *fc = get_fuse_conn(inode); 2244 struct fuse_file *ff = file->private_data; 2245 FUSE_ARGS(args); 2246 struct fuse_lseek_in inarg = { 2247 .fh = ff->fh, 2248 .offset = offset, 2249 .whence = whence 2250 }; 2251 struct fuse_lseek_out outarg; 2252 int err; 2253 2254 if (fc->no_lseek) 2255 goto fallback; 2256 2257 args.in.h.opcode = FUSE_LSEEK; 2258 args.in.h.nodeid = ff->nodeid; 2259 args.in.numargs = 1; 2260 args.in.args[0].size = sizeof(inarg); 2261 args.in.args[0].value = &inarg; 2262 args.out.numargs = 1; 2263 args.out.args[0].size = sizeof(outarg); 2264 args.out.args[0].value = &outarg; 2265 err = fuse_simple_request(fc, &args); 2266 if (err) { 2267 if (err == -ENOSYS) { 2268 fc->no_lseek = 1; 2269 goto fallback; 2270 } 2271 return err; 2272 } 2273 2274 return vfs_setpos(file, outarg.offset, inode->i_sb->s_maxbytes); 2275 2276 fallback: 2277 err = fuse_update_attributes(inode, NULL, file, NULL); 2278 if (!err) 2279 return generic_file_llseek(file, offset, whence); 2280 else 2281 return err; 2282 } 2283 2284 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence) 2285 { 2286 loff_t retval; 2287 struct inode *inode = file_inode(file); 2288 2289 switch (whence) { 2290 case SEEK_SET: 2291 case SEEK_CUR: 2292 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */ 2293 retval = generic_file_llseek(file, offset, whence); 2294 break; 2295 case SEEK_END: 2296 inode_lock(inode); 2297 retval = fuse_update_attributes(inode, NULL, file, NULL); 2298 if (!retval) 2299 retval = generic_file_llseek(file, offset, whence); 2300 inode_unlock(inode); 2301 break; 2302 case SEEK_HOLE: 2303 case SEEK_DATA: 2304 inode_lock(inode); 2305 retval = fuse_lseek(file, offset, whence); 2306 inode_unlock(inode); 2307 break; 2308 default: 2309 retval = -EINVAL; 2310 } 2311 2312 return retval; 2313 } 2314 2315 static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov, 2316 unsigned int nr_segs, size_t bytes, bool to_user) 2317 { 2318 struct iov_iter ii; 2319 int page_idx = 0; 2320 2321 if (!bytes) 2322 return 0; 2323 2324 iov_iter_init(&ii, to_user ? READ : WRITE, iov, nr_segs, bytes); 2325 2326 while (iov_iter_count(&ii)) { 2327 struct page *page = pages[page_idx++]; 2328 size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii)); 2329 void *kaddr; 2330 2331 kaddr = kmap(page); 2332 2333 while (todo) { 2334 char __user *uaddr = ii.iov->iov_base + ii.iov_offset; 2335 size_t iov_len = ii.iov->iov_len - ii.iov_offset; 2336 size_t copy = min(todo, iov_len); 2337 size_t left; 2338 2339 if (!to_user) 2340 left = copy_from_user(kaddr, uaddr, copy); 2341 else 2342 left = copy_to_user(uaddr, kaddr, copy); 2343 2344 if (unlikely(left)) 2345 return -EFAULT; 2346 2347 iov_iter_advance(&ii, copy); 2348 todo -= copy; 2349 kaddr += copy; 2350 } 2351 2352 kunmap(page); 2353 } 2354 2355 return 0; 2356 } 2357 2358 /* 2359 * CUSE servers compiled on 32bit broke on 64bit kernels because the 2360 * ABI was defined to be 'struct iovec' which is different on 32bit 2361 * and 64bit. Fortunately we can determine which structure the server 2362 * used from the size of the reply. 2363 */ 2364 static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src, 2365 size_t transferred, unsigned count, 2366 bool is_compat) 2367 { 2368 #ifdef CONFIG_COMPAT 2369 if (count * sizeof(struct compat_iovec) == transferred) { 2370 struct compat_iovec *ciov = src; 2371 unsigned i; 2372 2373 /* 2374 * With this interface a 32bit server cannot support 2375 * non-compat (i.e. ones coming from 64bit apps) ioctl 2376 * requests 2377 */ 2378 if (!is_compat) 2379 return -EINVAL; 2380 2381 for (i = 0; i < count; i++) { 2382 dst[i].iov_base = compat_ptr(ciov[i].iov_base); 2383 dst[i].iov_len = ciov[i].iov_len; 2384 } 2385 return 0; 2386 } 2387 #endif 2388 2389 if (count * sizeof(struct iovec) != transferred) 2390 return -EIO; 2391 2392 memcpy(dst, src, transferred); 2393 return 0; 2394 } 2395 2396 /* Make sure iov_length() won't overflow */ 2397 static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) 2398 { 2399 size_t n; 2400 u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; 2401 2402 for (n = 0; n < count; n++, iov++) { 2403 if (iov->iov_len > (size_t) max) 2404 return -ENOMEM; 2405 max -= iov->iov_len; 2406 } 2407 return 0; 2408 } 2409 2410 static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst, 2411 void *src, size_t transferred, unsigned count, 2412 bool is_compat) 2413 { 2414 unsigned i; 2415 struct fuse_ioctl_iovec *fiov = src; 2416 2417 if (fc->minor < 16) { 2418 return fuse_copy_ioctl_iovec_old(dst, src, transferred, 2419 count, is_compat); 2420 } 2421 2422 if (count * sizeof(struct fuse_ioctl_iovec) != transferred) 2423 return -EIO; 2424 2425 for (i = 0; i < count; i++) { 2426 /* Did the server supply an inappropriate value? */ 2427 if (fiov[i].base != (unsigned long) fiov[i].base || 2428 fiov[i].len != (unsigned long) fiov[i].len) 2429 return -EIO; 2430 2431 dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base; 2432 dst[i].iov_len = (size_t) fiov[i].len; 2433 2434 #ifdef CONFIG_COMPAT 2435 if (is_compat && 2436 (ptr_to_compat(dst[i].iov_base) != fiov[i].base || 2437 (compat_size_t) dst[i].iov_len != fiov[i].len)) 2438 return -EIO; 2439 #endif 2440 } 2441 2442 return 0; 2443 } 2444 2445 2446 /* 2447 * For ioctls, there is no generic way to determine how much memory 2448 * needs to be read and/or written. Furthermore, ioctls are allowed 2449 * to dereference the passed pointer, so the parameter requires deep 2450 * copying but FUSE has no idea whatsoever about what to copy in or 2451 * out. 2452 * 2453 * This is solved by allowing FUSE server to retry ioctl with 2454 * necessary in/out iovecs. Let's assume the ioctl implementation 2455 * needs to read in the following structure. 2456 * 2457 * struct a { 2458 * char *buf; 2459 * size_t buflen; 2460 * } 2461 * 2462 * On the first callout to FUSE server, inarg->in_size and 2463 * inarg->out_size will be NULL; then, the server completes the ioctl 2464 * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and 2465 * the actual iov array to 2466 * 2467 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } } 2468 * 2469 * which tells FUSE to copy in the requested area and retry the ioctl. 2470 * On the second round, the server has access to the structure and 2471 * from that it can tell what to look for next, so on the invocation, 2472 * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to 2473 * 2474 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) }, 2475 * { .iov_base = a.buf, .iov_len = a.buflen } } 2476 * 2477 * FUSE will copy both struct a and the pointed buffer from the 2478 * process doing the ioctl and retry ioctl with both struct a and the 2479 * buffer. 2480 * 2481 * This time, FUSE server has everything it needs and completes ioctl 2482 * without FUSE_IOCTL_RETRY which finishes the ioctl call. 2483 * 2484 * Copying data out works the same way. 2485 * 2486 * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel 2487 * automatically initializes in and out iovs by decoding @cmd with 2488 * _IOC_* macros and the server is not allowed to request RETRY. This 2489 * limits ioctl data transfers to well-formed ioctls and is the forced 2490 * behavior for all FUSE servers. 2491 */ 2492 long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, 2493 unsigned int flags) 2494 { 2495 struct fuse_file *ff = file->private_data; 2496 struct fuse_conn *fc = ff->fc; 2497 struct fuse_ioctl_in inarg = { 2498 .fh = ff->fh, 2499 .cmd = cmd, 2500 .arg = arg, 2501 .flags = flags 2502 }; 2503 struct fuse_ioctl_out outarg; 2504 struct fuse_req *req = NULL; 2505 struct page **pages = NULL; 2506 struct iovec *iov_page = NULL; 2507 struct iovec *in_iov = NULL, *out_iov = NULL; 2508 unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages; 2509 size_t in_size, out_size, transferred; 2510 int err; 2511 2512 #if BITS_PER_LONG == 32 2513 inarg.flags |= FUSE_IOCTL_32BIT; 2514 #else 2515 if (flags & FUSE_IOCTL_COMPAT) 2516 inarg.flags |= FUSE_IOCTL_32BIT; 2517 #endif 2518 2519 /* assume all the iovs returned by client always fits in a page */ 2520 BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); 2521 2522 err = -ENOMEM; 2523 pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, sizeof(pages[0]), GFP_KERNEL); 2524 iov_page = (struct iovec *) __get_free_page(GFP_KERNEL); 2525 if (!pages || !iov_page) 2526 goto out; 2527 2528 /* 2529 * If restricted, initialize IO parameters as encoded in @cmd. 2530 * RETRY from server is not allowed. 2531 */ 2532 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) { 2533 struct iovec *iov = iov_page; 2534 2535 iov->iov_base = (void __user *)arg; 2536 iov->iov_len = _IOC_SIZE(cmd); 2537 2538 if (_IOC_DIR(cmd) & _IOC_WRITE) { 2539 in_iov = iov; 2540 in_iovs = 1; 2541 } 2542 2543 if (_IOC_DIR(cmd) & _IOC_READ) { 2544 out_iov = iov; 2545 out_iovs = 1; 2546 } 2547 } 2548 2549 retry: 2550 inarg.in_size = in_size = iov_length(in_iov, in_iovs); 2551 inarg.out_size = out_size = iov_length(out_iov, out_iovs); 2552 2553 /* 2554 * Out data can be used either for actual out data or iovs, 2555 * make sure there always is at least one page. 2556 */ 2557 out_size = max_t(size_t, out_size, PAGE_SIZE); 2558 max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE); 2559 2560 /* make sure there are enough buffer pages and init request with them */ 2561 err = -ENOMEM; 2562 if (max_pages > FUSE_MAX_PAGES_PER_REQ) 2563 goto out; 2564 while (num_pages < max_pages) { 2565 pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); 2566 if (!pages[num_pages]) 2567 goto out; 2568 num_pages++; 2569 } 2570 2571 req = fuse_get_req(fc, num_pages); 2572 if (IS_ERR(req)) { 2573 err = PTR_ERR(req); 2574 req = NULL; 2575 goto out; 2576 } 2577 memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages); 2578 req->num_pages = num_pages; 2579 fuse_page_descs_length_init(req, 0, req->num_pages); 2580 2581 /* okay, let's send it to the client */ 2582 req->in.h.opcode = FUSE_IOCTL; 2583 req->in.h.nodeid = ff->nodeid; 2584 req->in.numargs = 1; 2585 req->in.args[0].size = sizeof(inarg); 2586 req->in.args[0].value = &inarg; 2587 if (in_size) { 2588 req->in.numargs++; 2589 req->in.args[1].size = in_size; 2590 req->in.argpages = 1; 2591 2592 err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size, 2593 false); 2594 if (err) 2595 goto out; 2596 } 2597 2598 req->out.numargs = 2; 2599 req->out.args[0].size = sizeof(outarg); 2600 req->out.args[0].value = &outarg; 2601 req->out.args[1].size = out_size; 2602 req->out.argpages = 1; 2603 req->out.argvar = 1; 2604 2605 fuse_request_send(fc, req); 2606 err = req->out.h.error; 2607 transferred = req->out.args[1].size; 2608 fuse_put_request(fc, req); 2609 req = NULL; 2610 if (err) 2611 goto out; 2612 2613 /* did it ask for retry? */ 2614 if (outarg.flags & FUSE_IOCTL_RETRY) { 2615 void *vaddr; 2616 2617 /* no retry if in restricted mode */ 2618 err = -EIO; 2619 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) 2620 goto out; 2621 2622 in_iovs = outarg.in_iovs; 2623 out_iovs = outarg.out_iovs; 2624 2625 /* 2626 * Make sure things are in boundary, separate checks 2627 * are to protect against overflow. 2628 */ 2629 err = -ENOMEM; 2630 if (in_iovs > FUSE_IOCTL_MAX_IOV || 2631 out_iovs > FUSE_IOCTL_MAX_IOV || 2632 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) 2633 goto out; 2634 2635 vaddr = kmap_atomic(pages[0]); 2636 err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr, 2637 transferred, in_iovs + out_iovs, 2638 (flags & FUSE_IOCTL_COMPAT) != 0); 2639 kunmap_atomic(vaddr); 2640 if (err) 2641 goto out; 2642 2643 in_iov = iov_page; 2644 out_iov = in_iov + in_iovs; 2645 2646 err = fuse_verify_ioctl_iov(in_iov, in_iovs); 2647 if (err) 2648 goto out; 2649 2650 err = fuse_verify_ioctl_iov(out_iov, out_iovs); 2651 if (err) 2652 goto out; 2653 2654 goto retry; 2655 } 2656 2657 err = -EIO; 2658 if (transferred > inarg.out_size) 2659 goto out; 2660 2661 err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true); 2662 out: 2663 if (req) 2664 fuse_put_request(fc, req); 2665 free_page((unsigned long) iov_page); 2666 while (num_pages) 2667 __free_page(pages[--num_pages]); 2668 kfree(pages); 2669 2670 return err ? err : outarg.result; 2671 } 2672 EXPORT_SYMBOL_GPL(fuse_do_ioctl); 2673 2674 long fuse_ioctl_common(struct file *file, unsigned int cmd, 2675 unsigned long arg, unsigned int flags) 2676 { 2677 struct inode *inode = file_inode(file); 2678 struct fuse_conn *fc = get_fuse_conn(inode); 2679 2680 if (!fuse_allow_current_process(fc)) 2681 return -EACCES; 2682 2683 if (is_bad_inode(inode)) 2684 return -EIO; 2685 2686 return fuse_do_ioctl(file, cmd, arg, flags); 2687 } 2688 2689 static long fuse_file_ioctl(struct file *file, unsigned int cmd, 2690 unsigned long arg) 2691 { 2692 return fuse_ioctl_common(file, cmd, arg, 0); 2693 } 2694 2695 static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, 2696 unsigned long arg) 2697 { 2698 return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT); 2699 } 2700 2701 /* 2702 * All files which have been polled are linked to RB tree 2703 * fuse_conn->polled_files which is indexed by kh. Walk the tree and 2704 * find the matching one. 2705 */ 2706 static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh, 2707 struct rb_node **parent_out) 2708 { 2709 struct rb_node **link = &fc->polled_files.rb_node; 2710 struct rb_node *last = NULL; 2711 2712 while (*link) { 2713 struct fuse_file *ff; 2714 2715 last = *link; 2716 ff = rb_entry(last, struct fuse_file, polled_node); 2717 2718 if (kh < ff->kh) 2719 link = &last->rb_left; 2720 else if (kh > ff->kh) 2721 link = &last->rb_right; 2722 else 2723 return link; 2724 } 2725 2726 if (parent_out) 2727 *parent_out = last; 2728 return link; 2729 } 2730 2731 /* 2732 * The file is about to be polled. Make sure it's on the polled_files 2733 * RB tree. Note that files once added to the polled_files tree are 2734 * not removed before the file is released. This is because a file 2735 * polled once is likely to be polled again. 2736 */ 2737 static void fuse_register_polled_file(struct fuse_conn *fc, 2738 struct fuse_file *ff) 2739 { 2740 spin_lock(&fc->lock); 2741 if (RB_EMPTY_NODE(&ff->polled_node)) { 2742 struct rb_node **link, *uninitialized_var(parent); 2743 2744 link = fuse_find_polled_node(fc, ff->kh, &parent); 2745 BUG_ON(*link); 2746 rb_link_node(&ff->polled_node, parent, link); 2747 rb_insert_color(&ff->polled_node, &fc->polled_files); 2748 } 2749 spin_unlock(&fc->lock); 2750 } 2751 2752 unsigned fuse_file_poll(struct file *file, poll_table *wait) 2753 { 2754 struct fuse_file *ff = file->private_data; 2755 struct fuse_conn *fc = ff->fc; 2756 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; 2757 struct fuse_poll_out outarg; 2758 FUSE_ARGS(args); 2759 int err; 2760 2761 if (fc->no_poll) 2762 return DEFAULT_POLLMASK; 2763 2764 poll_wait(file, &ff->poll_wait, wait); 2765 inarg.events = (__u32)poll_requested_events(wait); 2766 2767 /* 2768 * Ask for notification iff there's someone waiting for it. 2769 * The client may ignore the flag and always notify. 2770 */ 2771 if (waitqueue_active(&ff->poll_wait)) { 2772 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; 2773 fuse_register_polled_file(fc, ff); 2774 } 2775 2776 args.in.h.opcode = FUSE_POLL; 2777 args.in.h.nodeid = ff->nodeid; 2778 args.in.numargs = 1; 2779 args.in.args[0].size = sizeof(inarg); 2780 args.in.args[0].value = &inarg; 2781 args.out.numargs = 1; 2782 args.out.args[0].size = sizeof(outarg); 2783 args.out.args[0].value = &outarg; 2784 err = fuse_simple_request(fc, &args); 2785 2786 if (!err) 2787 return outarg.revents; 2788 if (err == -ENOSYS) { 2789 fc->no_poll = 1; 2790 return DEFAULT_POLLMASK; 2791 } 2792 return POLLERR; 2793 } 2794 EXPORT_SYMBOL_GPL(fuse_file_poll); 2795 2796 /* 2797 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and 2798 * wakes up the poll waiters. 2799 */ 2800 int fuse_notify_poll_wakeup(struct fuse_conn *fc, 2801 struct fuse_notify_poll_wakeup_out *outarg) 2802 { 2803 u64 kh = outarg->kh; 2804 struct rb_node **link; 2805 2806 spin_lock(&fc->lock); 2807 2808 link = fuse_find_polled_node(fc, kh, NULL); 2809 if (*link) { 2810 struct fuse_file *ff; 2811 2812 ff = rb_entry(*link, struct fuse_file, polled_node); 2813 wake_up_interruptible_sync(&ff->poll_wait); 2814 } 2815 2816 spin_unlock(&fc->lock); 2817 return 0; 2818 } 2819 2820 static void fuse_do_truncate(struct file *file) 2821 { 2822 struct inode *inode = file->f_mapping->host; 2823 struct iattr attr; 2824 2825 attr.ia_valid = ATTR_SIZE; 2826 attr.ia_size = i_size_read(inode); 2827 2828 attr.ia_file = file; 2829 attr.ia_valid |= ATTR_FILE; 2830 2831 fuse_do_setattr(inode, &attr, file); 2832 } 2833 2834 static inline loff_t fuse_round_up(loff_t off) 2835 { 2836 return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); 2837 } 2838 2839 static ssize_t 2840 fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 2841 { 2842 DECLARE_COMPLETION_ONSTACK(wait); 2843 ssize_t ret = 0; 2844 struct file *file = iocb->ki_filp; 2845 struct fuse_file *ff = file->private_data; 2846 bool async_dio = ff->fc->async_dio; 2847 loff_t pos = 0; 2848 struct inode *inode; 2849 loff_t i_size; 2850 size_t count = iov_iter_count(iter); 2851 loff_t offset = iocb->ki_pos; 2852 struct fuse_io_priv *io; 2853 bool is_sync = is_sync_kiocb(iocb); 2854 2855 pos = offset; 2856 inode = file->f_mapping->host; 2857 i_size = i_size_read(inode); 2858 2859 if ((iov_iter_rw(iter) == READ) && (offset > i_size)) 2860 return 0; 2861 2862 /* optimization for short read */ 2863 if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) { 2864 if (offset >= i_size) 2865 return 0; 2866 iov_iter_truncate(iter, fuse_round_up(i_size - offset)); 2867 count = iov_iter_count(iter); 2868 } 2869 2870 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); 2871 if (!io) 2872 return -ENOMEM; 2873 spin_lock_init(&io->lock); 2874 kref_init(&io->refcnt); 2875 io->reqs = 1; 2876 io->bytes = -1; 2877 io->size = 0; 2878 io->offset = offset; 2879 io->write = (iov_iter_rw(iter) == WRITE); 2880 io->err = 0; 2881 io->file = file; 2882 /* 2883 * By default, we want to optimize all I/Os with async request 2884 * submission to the client filesystem if supported. 2885 */ 2886 io->async = async_dio; 2887 io->iocb = iocb; 2888 2889 /* 2890 * We cannot asynchronously extend the size of a file. We have no method 2891 * to wait on real async I/O requests, so we must submit this request 2892 * synchronously. 2893 */ 2894 if (!is_sync && (offset + count > i_size) && 2895 iov_iter_rw(iter) == WRITE) 2896 io->async = false; 2897 2898 if (io->async && is_sync) { 2899 /* 2900 * Additional reference to keep io around after 2901 * calling fuse_aio_complete() 2902 */ 2903 kref_get(&io->refcnt); 2904 io->done = &wait; 2905 } 2906 2907 if (iov_iter_rw(iter) == WRITE) { 2908 ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE); 2909 fuse_invalidate_attr(inode); 2910 } else { 2911 ret = __fuse_direct_read(io, iter, &pos); 2912 } 2913 2914 if (io->async) { 2915 fuse_aio_complete(io, ret < 0 ? ret : 0, -1); 2916 2917 /* we have a non-extending, async request, so return */ 2918 if (!is_sync) 2919 return -EIOCBQUEUED; 2920 2921 wait_for_completion(&wait); 2922 ret = fuse_get_res_by_io(io); 2923 } 2924 2925 kref_put(&io->refcnt, fuse_io_release); 2926 2927 if (iov_iter_rw(iter) == WRITE) { 2928 if (ret > 0) 2929 fuse_write_update_size(inode, pos); 2930 else if (ret < 0 && offset + count > i_size) 2931 fuse_do_truncate(file); 2932 } 2933 2934 return ret; 2935 } 2936 2937 static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, 2938 loff_t length) 2939 { 2940 struct fuse_file *ff = file->private_data; 2941 struct inode *inode = file_inode(file); 2942 struct fuse_inode *fi = get_fuse_inode(inode); 2943 struct fuse_conn *fc = ff->fc; 2944 FUSE_ARGS(args); 2945 struct fuse_fallocate_in inarg = { 2946 .fh = ff->fh, 2947 .offset = offset, 2948 .length = length, 2949 .mode = mode 2950 }; 2951 int err; 2952 bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) || 2953 (mode & FALLOC_FL_PUNCH_HOLE); 2954 2955 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 2956 return -EOPNOTSUPP; 2957 2958 if (fc->no_fallocate) 2959 return -EOPNOTSUPP; 2960 2961 if (lock_inode) { 2962 inode_lock(inode); 2963 if (mode & FALLOC_FL_PUNCH_HOLE) { 2964 loff_t endbyte = offset + length - 1; 2965 err = filemap_write_and_wait_range(inode->i_mapping, 2966 offset, endbyte); 2967 if (err) 2968 goto out; 2969 2970 fuse_sync_writes(inode); 2971 } 2972 } 2973 2974 if (!(mode & FALLOC_FL_KEEP_SIZE)) 2975 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 2976 2977 args.in.h.opcode = FUSE_FALLOCATE; 2978 args.in.h.nodeid = ff->nodeid; 2979 args.in.numargs = 1; 2980 args.in.args[0].size = sizeof(inarg); 2981 args.in.args[0].value = &inarg; 2982 err = fuse_simple_request(fc, &args); 2983 if (err == -ENOSYS) { 2984 fc->no_fallocate = 1; 2985 err = -EOPNOTSUPP; 2986 } 2987 if (err) 2988 goto out; 2989 2990 /* we could have extended the file */ 2991 if (!(mode & FALLOC_FL_KEEP_SIZE)) { 2992 bool changed = fuse_write_update_size(inode, offset + length); 2993 2994 if (changed && fc->writeback_cache) 2995 file_update_time(file); 2996 } 2997 2998 if (mode & FALLOC_FL_PUNCH_HOLE) 2999 truncate_pagecache_range(inode, offset, offset + length - 1); 3000 3001 fuse_invalidate_attr(inode); 3002 3003 out: 3004 if (!(mode & FALLOC_FL_KEEP_SIZE)) 3005 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 3006 3007 if (lock_inode) 3008 inode_unlock(inode); 3009 3010 return err; 3011 } 3012 3013 static const struct file_operations fuse_file_operations = { 3014 .llseek = fuse_file_llseek, 3015 .read_iter = fuse_file_read_iter, 3016 .write_iter = fuse_file_write_iter, 3017 .mmap = fuse_file_mmap, 3018 .open = fuse_open, 3019 .flush = fuse_flush, 3020 .release = fuse_release, 3021 .fsync = fuse_fsync, 3022 .lock = fuse_file_lock, 3023 .flock = fuse_file_flock, 3024 .splice_read = generic_file_splice_read, 3025 .unlocked_ioctl = fuse_file_ioctl, 3026 .compat_ioctl = fuse_file_compat_ioctl, 3027 .poll = fuse_file_poll, 3028 .fallocate = fuse_file_fallocate, 3029 }; 3030 3031 static const struct file_operations fuse_direct_io_file_operations = { 3032 .llseek = fuse_file_llseek, 3033 .read_iter = fuse_direct_read_iter, 3034 .write_iter = fuse_direct_write_iter, 3035 .mmap = fuse_direct_mmap, 3036 .open = fuse_open, 3037 .flush = fuse_flush, 3038 .release = fuse_release, 3039 .fsync = fuse_fsync, 3040 .lock = fuse_file_lock, 3041 .flock = fuse_file_flock, 3042 .unlocked_ioctl = fuse_file_ioctl, 3043 .compat_ioctl = fuse_file_compat_ioctl, 3044 .poll = fuse_file_poll, 3045 .fallocate = fuse_file_fallocate, 3046 /* no splice_read */ 3047 }; 3048 3049 static const struct address_space_operations fuse_file_aops = { 3050 .readpage = fuse_readpage, 3051 .writepage = fuse_writepage, 3052 .writepages = fuse_writepages, 3053 .launder_page = fuse_launder_page, 3054 .readpages = fuse_readpages, 3055 .set_page_dirty = __set_page_dirty_nobuffers, 3056 .bmap = fuse_bmap, 3057 .direct_IO = fuse_direct_IO, 3058 .write_begin = fuse_write_begin, 3059 .write_end = fuse_write_end, 3060 }; 3061 3062 void fuse_init_file_inode(struct inode *inode) 3063 { 3064 inode->i_fop = &fuse_file_operations; 3065 inode->i_data.a_ops = &fuse_file_aops; 3066 } 3067