1 /* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7 */ 8 9 #include "fuse_i.h" 10 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/poll.h> 14 #include <linux/sched/signal.h> 15 #include <linux/uio.h> 16 #include <linux/miscdevice.h> 17 #include <linux/pagemap.h> 18 #include <linux/file.h> 19 #include <linux/slab.h> 20 #include <linux/pipe_fs_i.h> 21 #include <linux/swap.h> 22 #include <linux/splice.h> 23 #include <linux/sched.h> 24 25 MODULE_ALIAS_MISCDEV(FUSE_MINOR); 26 MODULE_ALIAS("devname:fuse"); 27 28 /* Ordinary requests have even IDs, while interrupts IDs are odd */ 29 #define FUSE_INT_REQ_BIT (1ULL << 0) 30 #define FUSE_REQ_ID_STEP (1ULL << 1) 31 32 static struct kmem_cache *fuse_req_cachep; 33 34 static struct fuse_dev *fuse_get_dev(struct file *file) 35 { 36 /* 37 * Lockless access is OK, because file->private data is set 38 * once during mount and is valid until the file is released. 39 */ 40 return READ_ONCE(file->private_data); 41 } 42 43 static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req) 44 { 45 INIT_LIST_HEAD(&req->list); 46 INIT_LIST_HEAD(&req->intr_entry); 47 init_waitqueue_head(&req->waitq); 48 refcount_set(&req->count, 1); 49 __set_bit(FR_PENDING, &req->flags); 50 req->fm = fm; 51 } 52 53 static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags) 54 { 55 struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags); 56 if (req) 57 fuse_request_init(fm, req); 58 59 return req; 60 } 61 62 static void fuse_request_free(struct fuse_req *req) 63 { 64 kmem_cache_free(fuse_req_cachep, req); 65 } 66 67 static void __fuse_get_request(struct fuse_req *req) 68 { 69 refcount_inc(&req->count); 70 } 71 72 /* Must be called with > 1 refcount */ 73 static void __fuse_put_request(struct fuse_req *req) 74 { 75 refcount_dec(&req->count); 76 } 77 78 void fuse_set_initialized(struct fuse_conn *fc) 79 { 80 /* Make sure stores before this are seen on another CPU */ 81 smp_wmb(); 82 fc->initialized = 1; 83 } 84 85 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) 86 { 87 return !fc->initialized || (for_background && fc->blocked); 88 } 89 90 static void fuse_drop_waiting(struct fuse_conn *fc) 91 { 92 /* 93 * lockess check of fc->connected is okay, because atomic_dec_and_test() 94 * provides a memory barrier matched with the one in fuse_wait_aborted() 95 * to ensure no wake-up is missed. 96 */ 97 if (atomic_dec_and_test(&fc->num_waiting) && 98 !READ_ONCE(fc->connected)) { 99 /* wake up aborters */ 100 wake_up_all(&fc->blocked_waitq); 101 } 102 } 103 104 static void fuse_put_request(struct fuse_req *req); 105 106 static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background) 107 { 108 struct fuse_conn *fc = fm->fc; 109 struct fuse_req *req; 110 int err; 111 atomic_inc(&fc->num_waiting); 112 113 if (fuse_block_alloc(fc, for_background)) { 114 err = -EINTR; 115 if (wait_event_killable_exclusive(fc->blocked_waitq, 116 !fuse_block_alloc(fc, for_background))) 117 goto out; 118 } 119 /* Matches smp_wmb() in fuse_set_initialized() */ 120 smp_rmb(); 121 122 err = -ENOTCONN; 123 if (!fc->connected) 124 goto out; 125 126 err = -ECONNREFUSED; 127 if (fc->conn_error) 128 goto out; 129 130 req = fuse_request_alloc(fm, GFP_KERNEL); 131 err = -ENOMEM; 132 if (!req) { 133 if (for_background) 134 wake_up(&fc->blocked_waitq); 135 goto out; 136 } 137 138 req->in.h.uid = from_kuid(fc->user_ns, current_fsuid()); 139 req->in.h.gid = from_kgid(fc->user_ns, current_fsgid()); 140 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); 141 142 __set_bit(FR_WAITING, &req->flags); 143 if (for_background) 144 __set_bit(FR_BACKGROUND, &req->flags); 145 146 if (unlikely(req->in.h.uid == ((uid_t)-1) || 147 req->in.h.gid == ((gid_t)-1))) { 148 fuse_put_request(req); 149 return ERR_PTR(-EOVERFLOW); 150 } 151 return req; 152 153 out: 154 fuse_drop_waiting(fc); 155 return ERR_PTR(err); 156 } 157 158 static void fuse_put_request(struct fuse_req *req) 159 { 160 struct fuse_conn *fc = req->fm->fc; 161 162 if (refcount_dec_and_test(&req->count)) { 163 if (test_bit(FR_BACKGROUND, &req->flags)) { 164 /* 165 * We get here in the unlikely case that a background 166 * request was allocated but not sent 167 */ 168 spin_lock(&fc->bg_lock); 169 if (!fc->blocked) 170 wake_up(&fc->blocked_waitq); 171 spin_unlock(&fc->bg_lock); 172 } 173 174 if (test_bit(FR_WAITING, &req->flags)) { 175 __clear_bit(FR_WAITING, &req->flags); 176 fuse_drop_waiting(fc); 177 } 178 179 fuse_request_free(req); 180 } 181 } 182 183 unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args) 184 { 185 unsigned nbytes = 0; 186 unsigned i; 187 188 for (i = 0; i < numargs; i++) 189 nbytes += args[i].size; 190 191 return nbytes; 192 } 193 EXPORT_SYMBOL_GPL(fuse_len_args); 194 195 u64 fuse_get_unique(struct fuse_iqueue *fiq) 196 { 197 fiq->reqctr += FUSE_REQ_ID_STEP; 198 return fiq->reqctr; 199 } 200 EXPORT_SYMBOL_GPL(fuse_get_unique); 201 202 static unsigned int fuse_req_hash(u64 unique) 203 { 204 return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS); 205 } 206 207 /** 208 * A new request is available, wake fiq->waitq 209 */ 210 static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq) 211 __releases(fiq->lock) 212 { 213 wake_up(&fiq->waitq); 214 kill_fasync(&fiq->fasync, SIGIO, POLL_IN); 215 spin_unlock(&fiq->lock); 216 } 217 218 const struct fuse_iqueue_ops fuse_dev_fiq_ops = { 219 .wake_forget_and_unlock = fuse_dev_wake_and_unlock, 220 .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock, 221 .wake_pending_and_unlock = fuse_dev_wake_and_unlock, 222 }; 223 EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops); 224 225 static void queue_request_and_unlock(struct fuse_iqueue *fiq, 226 struct fuse_req *req) 227 __releases(fiq->lock) 228 { 229 req->in.h.len = sizeof(struct fuse_in_header) + 230 fuse_len_args(req->args->in_numargs, 231 (struct fuse_arg *) req->args->in_args); 232 list_add_tail(&req->list, &fiq->pending); 233 fiq->ops->wake_pending_and_unlock(fiq); 234 } 235 236 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, 237 u64 nodeid, u64 nlookup) 238 { 239 struct fuse_iqueue *fiq = &fc->iq; 240 241 forget->forget_one.nodeid = nodeid; 242 forget->forget_one.nlookup = nlookup; 243 244 spin_lock(&fiq->lock); 245 if (fiq->connected) { 246 fiq->forget_list_tail->next = forget; 247 fiq->forget_list_tail = forget; 248 fiq->ops->wake_forget_and_unlock(fiq); 249 } else { 250 kfree(forget); 251 spin_unlock(&fiq->lock); 252 } 253 } 254 255 static void flush_bg_queue(struct fuse_conn *fc) 256 { 257 struct fuse_iqueue *fiq = &fc->iq; 258 259 while (fc->active_background < fc->max_background && 260 !list_empty(&fc->bg_queue)) { 261 struct fuse_req *req; 262 263 req = list_first_entry(&fc->bg_queue, struct fuse_req, list); 264 list_del(&req->list); 265 fc->active_background++; 266 spin_lock(&fiq->lock); 267 req->in.h.unique = fuse_get_unique(fiq); 268 queue_request_and_unlock(fiq, req); 269 } 270 } 271 272 /* 273 * This function is called when a request is finished. Either a reply 274 * has arrived or it was aborted (and not yet sent) or some error 275 * occurred during communication with userspace, or the device file 276 * was closed. The requester thread is woken up (if still waiting), 277 * the 'end' callback is called if given, else the reference to the 278 * request is released 279 */ 280 void fuse_request_end(struct fuse_req *req) 281 { 282 struct fuse_mount *fm = req->fm; 283 struct fuse_conn *fc = fm->fc; 284 struct fuse_iqueue *fiq = &fc->iq; 285 286 if (test_and_set_bit(FR_FINISHED, &req->flags)) 287 goto put_request; 288 289 /* 290 * test_and_set_bit() implies smp_mb() between bit 291 * changing and below FR_INTERRUPTED check. Pairs with 292 * smp_mb() from queue_interrupt(). 293 */ 294 if (test_bit(FR_INTERRUPTED, &req->flags)) { 295 spin_lock(&fiq->lock); 296 list_del_init(&req->intr_entry); 297 spin_unlock(&fiq->lock); 298 } 299 WARN_ON(test_bit(FR_PENDING, &req->flags)); 300 WARN_ON(test_bit(FR_SENT, &req->flags)); 301 if (test_bit(FR_BACKGROUND, &req->flags)) { 302 spin_lock(&fc->bg_lock); 303 clear_bit(FR_BACKGROUND, &req->flags); 304 if (fc->num_background == fc->max_background) { 305 fc->blocked = 0; 306 wake_up(&fc->blocked_waitq); 307 } else if (!fc->blocked) { 308 /* 309 * Wake up next waiter, if any. It's okay to use 310 * waitqueue_active(), as we've already synced up 311 * fc->blocked with waiters with the wake_up() call 312 * above. 313 */ 314 if (waitqueue_active(&fc->blocked_waitq)) 315 wake_up(&fc->blocked_waitq); 316 } 317 318 if (fc->num_background == fc->congestion_threshold && fm->sb) { 319 clear_bdi_congested(fm->sb->s_bdi, BLK_RW_SYNC); 320 clear_bdi_congested(fm->sb->s_bdi, BLK_RW_ASYNC); 321 } 322 fc->num_background--; 323 fc->active_background--; 324 flush_bg_queue(fc); 325 spin_unlock(&fc->bg_lock); 326 } else { 327 /* Wake up waiter sleeping in request_wait_answer() */ 328 wake_up(&req->waitq); 329 } 330 331 if (test_bit(FR_ASYNC, &req->flags)) 332 req->args->end(fm, req->args, req->out.h.error); 333 put_request: 334 fuse_put_request(req); 335 } 336 EXPORT_SYMBOL_GPL(fuse_request_end); 337 338 static int queue_interrupt(struct fuse_req *req) 339 { 340 struct fuse_iqueue *fiq = &req->fm->fc->iq; 341 342 spin_lock(&fiq->lock); 343 /* Check for we've sent request to interrupt this req */ 344 if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) { 345 spin_unlock(&fiq->lock); 346 return -EINVAL; 347 } 348 349 if (list_empty(&req->intr_entry)) { 350 list_add_tail(&req->intr_entry, &fiq->interrupts); 351 /* 352 * Pairs with smp_mb() implied by test_and_set_bit() 353 * from fuse_request_end(). 354 */ 355 smp_mb(); 356 if (test_bit(FR_FINISHED, &req->flags)) { 357 list_del_init(&req->intr_entry); 358 spin_unlock(&fiq->lock); 359 return 0; 360 } 361 fiq->ops->wake_interrupt_and_unlock(fiq); 362 } else { 363 spin_unlock(&fiq->lock); 364 } 365 return 0; 366 } 367 368 static void request_wait_answer(struct fuse_req *req) 369 { 370 struct fuse_conn *fc = req->fm->fc; 371 struct fuse_iqueue *fiq = &fc->iq; 372 int err; 373 374 if (!fc->no_interrupt) { 375 /* Any signal may interrupt this */ 376 err = wait_event_interruptible(req->waitq, 377 test_bit(FR_FINISHED, &req->flags)); 378 if (!err) 379 return; 380 381 set_bit(FR_INTERRUPTED, &req->flags); 382 /* matches barrier in fuse_dev_do_read() */ 383 smp_mb__after_atomic(); 384 if (test_bit(FR_SENT, &req->flags)) 385 queue_interrupt(req); 386 } 387 388 if (!test_bit(FR_FORCE, &req->flags)) { 389 /* Only fatal signals may interrupt this */ 390 err = wait_event_killable(req->waitq, 391 test_bit(FR_FINISHED, &req->flags)); 392 if (!err) 393 return; 394 395 spin_lock(&fiq->lock); 396 /* Request is not yet in userspace, bail out */ 397 if (test_bit(FR_PENDING, &req->flags)) { 398 list_del(&req->list); 399 spin_unlock(&fiq->lock); 400 __fuse_put_request(req); 401 req->out.h.error = -EINTR; 402 return; 403 } 404 spin_unlock(&fiq->lock); 405 } 406 407 /* 408 * Either request is already in userspace, or it was forced. 409 * Wait it out. 410 */ 411 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags)); 412 } 413 414 static void __fuse_request_send(struct fuse_req *req) 415 { 416 struct fuse_iqueue *fiq = &req->fm->fc->iq; 417 418 BUG_ON(test_bit(FR_BACKGROUND, &req->flags)); 419 spin_lock(&fiq->lock); 420 if (!fiq->connected) { 421 spin_unlock(&fiq->lock); 422 req->out.h.error = -ENOTCONN; 423 } else { 424 req->in.h.unique = fuse_get_unique(fiq); 425 /* acquire extra reference, since request is still needed 426 after fuse_request_end() */ 427 __fuse_get_request(req); 428 queue_request_and_unlock(fiq, req); 429 430 request_wait_answer(req); 431 /* Pairs with smp_wmb() in fuse_request_end() */ 432 smp_rmb(); 433 } 434 } 435 436 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) 437 { 438 if (fc->minor < 4 && args->opcode == FUSE_STATFS) 439 args->out_args[0].size = FUSE_COMPAT_STATFS_SIZE; 440 441 if (fc->minor < 9) { 442 switch (args->opcode) { 443 case FUSE_LOOKUP: 444 case FUSE_CREATE: 445 case FUSE_MKNOD: 446 case FUSE_MKDIR: 447 case FUSE_SYMLINK: 448 case FUSE_LINK: 449 args->out_args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; 450 break; 451 case FUSE_GETATTR: 452 case FUSE_SETATTR: 453 args->out_args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; 454 break; 455 } 456 } 457 if (fc->minor < 12) { 458 switch (args->opcode) { 459 case FUSE_CREATE: 460 args->in_args[0].size = sizeof(struct fuse_open_in); 461 break; 462 case FUSE_MKNOD: 463 args->in_args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE; 464 break; 465 } 466 } 467 } 468 469 static void fuse_force_creds(struct fuse_req *req) 470 { 471 struct fuse_conn *fc = req->fm->fc; 472 473 req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid()); 474 req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid()); 475 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); 476 } 477 478 static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args) 479 { 480 req->in.h.opcode = args->opcode; 481 req->in.h.nodeid = args->nodeid; 482 req->args = args; 483 if (args->end) 484 __set_bit(FR_ASYNC, &req->flags); 485 } 486 487 ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args) 488 { 489 struct fuse_conn *fc = fm->fc; 490 struct fuse_req *req; 491 ssize_t ret; 492 493 if (args->force) { 494 atomic_inc(&fc->num_waiting); 495 req = fuse_request_alloc(fm, GFP_KERNEL | __GFP_NOFAIL); 496 497 if (!args->nocreds) 498 fuse_force_creds(req); 499 500 __set_bit(FR_WAITING, &req->flags); 501 __set_bit(FR_FORCE, &req->flags); 502 } else { 503 WARN_ON(args->nocreds); 504 req = fuse_get_req(fm, false); 505 if (IS_ERR(req)) 506 return PTR_ERR(req); 507 } 508 509 /* Needs to be done after fuse_get_req() so that fc->minor is valid */ 510 fuse_adjust_compat(fc, args); 511 fuse_args_to_req(req, args); 512 513 if (!args->noreply) 514 __set_bit(FR_ISREPLY, &req->flags); 515 __fuse_request_send(req); 516 ret = req->out.h.error; 517 if (!ret && args->out_argvar) { 518 BUG_ON(args->out_numargs == 0); 519 ret = args->out_args[args->out_numargs - 1].size; 520 } 521 fuse_put_request(req); 522 523 return ret; 524 } 525 526 static bool fuse_request_queue_background(struct fuse_req *req) 527 { 528 struct fuse_mount *fm = req->fm; 529 struct fuse_conn *fc = fm->fc; 530 bool queued = false; 531 532 WARN_ON(!test_bit(FR_BACKGROUND, &req->flags)); 533 if (!test_bit(FR_WAITING, &req->flags)) { 534 __set_bit(FR_WAITING, &req->flags); 535 atomic_inc(&fc->num_waiting); 536 } 537 __set_bit(FR_ISREPLY, &req->flags); 538 spin_lock(&fc->bg_lock); 539 if (likely(fc->connected)) { 540 fc->num_background++; 541 if (fc->num_background == fc->max_background) 542 fc->blocked = 1; 543 if (fc->num_background == fc->congestion_threshold && fm->sb) { 544 set_bdi_congested(fm->sb->s_bdi, BLK_RW_SYNC); 545 set_bdi_congested(fm->sb->s_bdi, BLK_RW_ASYNC); 546 } 547 list_add_tail(&req->list, &fc->bg_queue); 548 flush_bg_queue(fc); 549 queued = true; 550 } 551 spin_unlock(&fc->bg_lock); 552 553 return queued; 554 } 555 556 int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args, 557 gfp_t gfp_flags) 558 { 559 struct fuse_req *req; 560 561 if (args->force) { 562 WARN_ON(!args->nocreds); 563 req = fuse_request_alloc(fm, gfp_flags); 564 if (!req) 565 return -ENOMEM; 566 __set_bit(FR_BACKGROUND, &req->flags); 567 } else { 568 WARN_ON(args->nocreds); 569 req = fuse_get_req(fm, true); 570 if (IS_ERR(req)) 571 return PTR_ERR(req); 572 } 573 574 fuse_args_to_req(req, args); 575 576 if (!fuse_request_queue_background(req)) { 577 fuse_put_request(req); 578 return -ENOTCONN; 579 } 580 581 return 0; 582 } 583 EXPORT_SYMBOL_GPL(fuse_simple_background); 584 585 static int fuse_simple_notify_reply(struct fuse_mount *fm, 586 struct fuse_args *args, u64 unique) 587 { 588 struct fuse_req *req; 589 struct fuse_iqueue *fiq = &fm->fc->iq; 590 int err = 0; 591 592 req = fuse_get_req(fm, false); 593 if (IS_ERR(req)) 594 return PTR_ERR(req); 595 596 __clear_bit(FR_ISREPLY, &req->flags); 597 req->in.h.unique = unique; 598 599 fuse_args_to_req(req, args); 600 601 spin_lock(&fiq->lock); 602 if (fiq->connected) { 603 queue_request_and_unlock(fiq, req); 604 } else { 605 err = -ENODEV; 606 spin_unlock(&fiq->lock); 607 fuse_put_request(req); 608 } 609 610 return err; 611 } 612 613 /* 614 * Lock the request. Up to the next unlock_request() there mustn't be 615 * anything that could cause a page-fault. If the request was already 616 * aborted bail out. 617 */ 618 static int lock_request(struct fuse_req *req) 619 { 620 int err = 0; 621 if (req) { 622 spin_lock(&req->waitq.lock); 623 if (test_bit(FR_ABORTED, &req->flags)) 624 err = -ENOENT; 625 else 626 set_bit(FR_LOCKED, &req->flags); 627 spin_unlock(&req->waitq.lock); 628 } 629 return err; 630 } 631 632 /* 633 * Unlock request. If it was aborted while locked, caller is responsible 634 * for unlocking and ending the request. 635 */ 636 static int unlock_request(struct fuse_req *req) 637 { 638 int err = 0; 639 if (req) { 640 spin_lock(&req->waitq.lock); 641 if (test_bit(FR_ABORTED, &req->flags)) 642 err = -ENOENT; 643 else 644 clear_bit(FR_LOCKED, &req->flags); 645 spin_unlock(&req->waitq.lock); 646 } 647 return err; 648 } 649 650 struct fuse_copy_state { 651 int write; 652 struct fuse_req *req; 653 struct iov_iter *iter; 654 struct pipe_buffer *pipebufs; 655 struct pipe_buffer *currbuf; 656 struct pipe_inode_info *pipe; 657 unsigned long nr_segs; 658 struct page *pg; 659 unsigned len; 660 unsigned offset; 661 unsigned move_pages:1; 662 }; 663 664 static void fuse_copy_init(struct fuse_copy_state *cs, int write, 665 struct iov_iter *iter) 666 { 667 memset(cs, 0, sizeof(*cs)); 668 cs->write = write; 669 cs->iter = iter; 670 } 671 672 /* Unmap and put previous page of userspace buffer */ 673 static void fuse_copy_finish(struct fuse_copy_state *cs) 674 { 675 if (cs->currbuf) { 676 struct pipe_buffer *buf = cs->currbuf; 677 678 if (cs->write) 679 buf->len = PAGE_SIZE - cs->len; 680 cs->currbuf = NULL; 681 } else if (cs->pg) { 682 if (cs->write) { 683 flush_dcache_page(cs->pg); 684 set_page_dirty_lock(cs->pg); 685 } 686 put_page(cs->pg); 687 } 688 cs->pg = NULL; 689 } 690 691 /* 692 * Get another pagefull of userspace buffer, and map it to kernel 693 * address space, and lock request 694 */ 695 static int fuse_copy_fill(struct fuse_copy_state *cs) 696 { 697 struct page *page; 698 int err; 699 700 err = unlock_request(cs->req); 701 if (err) 702 return err; 703 704 fuse_copy_finish(cs); 705 if (cs->pipebufs) { 706 struct pipe_buffer *buf = cs->pipebufs; 707 708 if (!cs->write) { 709 err = pipe_buf_confirm(cs->pipe, buf); 710 if (err) 711 return err; 712 713 BUG_ON(!cs->nr_segs); 714 cs->currbuf = buf; 715 cs->pg = buf->page; 716 cs->offset = buf->offset; 717 cs->len = buf->len; 718 cs->pipebufs++; 719 cs->nr_segs--; 720 } else { 721 if (cs->nr_segs >= cs->pipe->max_usage) 722 return -EIO; 723 724 page = alloc_page(GFP_HIGHUSER); 725 if (!page) 726 return -ENOMEM; 727 728 buf->page = page; 729 buf->offset = 0; 730 buf->len = 0; 731 732 cs->currbuf = buf; 733 cs->pg = page; 734 cs->offset = 0; 735 cs->len = PAGE_SIZE; 736 cs->pipebufs++; 737 cs->nr_segs++; 738 } 739 } else { 740 size_t off; 741 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off); 742 if (err < 0) 743 return err; 744 BUG_ON(!err); 745 cs->len = err; 746 cs->offset = off; 747 cs->pg = page; 748 iov_iter_advance(cs->iter, err); 749 } 750 751 return lock_request(cs->req); 752 } 753 754 /* Do as much copy to/from userspace buffer as we can */ 755 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) 756 { 757 unsigned ncpy = min(*size, cs->len); 758 if (val) { 759 void *pgaddr = kmap_local_page(cs->pg); 760 void *buf = pgaddr + cs->offset; 761 762 if (cs->write) 763 memcpy(buf, *val, ncpy); 764 else 765 memcpy(*val, buf, ncpy); 766 767 kunmap_local(pgaddr); 768 *val += ncpy; 769 } 770 *size -= ncpy; 771 cs->len -= ncpy; 772 cs->offset += ncpy; 773 return ncpy; 774 } 775 776 static int fuse_check_page(struct page *page) 777 { 778 if (page_mapcount(page) || 779 page->mapping != NULL || 780 (page->flags & PAGE_FLAGS_CHECK_AT_PREP & 781 ~(1 << PG_locked | 782 1 << PG_referenced | 783 1 << PG_uptodate | 784 1 << PG_lru | 785 1 << PG_active | 786 1 << PG_workingset | 787 1 << PG_reclaim | 788 1 << PG_waiters))) { 789 dump_page(page, "fuse: trying to steal weird page"); 790 return 1; 791 } 792 return 0; 793 } 794 795 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) 796 { 797 int err; 798 struct page *oldpage = *pagep; 799 struct page *newpage; 800 struct pipe_buffer *buf = cs->pipebufs; 801 802 get_page(oldpage); 803 err = unlock_request(cs->req); 804 if (err) 805 goto out_put_old; 806 807 fuse_copy_finish(cs); 808 809 err = pipe_buf_confirm(cs->pipe, buf); 810 if (err) 811 goto out_put_old; 812 813 BUG_ON(!cs->nr_segs); 814 cs->currbuf = buf; 815 cs->len = buf->len; 816 cs->pipebufs++; 817 cs->nr_segs--; 818 819 if (cs->len != PAGE_SIZE) 820 goto out_fallback; 821 822 if (!pipe_buf_try_steal(cs->pipe, buf)) 823 goto out_fallback; 824 825 newpage = buf->page; 826 827 if (!PageUptodate(newpage)) 828 SetPageUptodate(newpage); 829 830 ClearPageMappedToDisk(newpage); 831 832 if (fuse_check_page(newpage) != 0) 833 goto out_fallback_unlock; 834 835 /* 836 * This is a new and locked page, it shouldn't be mapped or 837 * have any special flags on it 838 */ 839 if (WARN_ON(page_mapped(oldpage))) 840 goto out_fallback_unlock; 841 if (WARN_ON(page_has_private(oldpage))) 842 goto out_fallback_unlock; 843 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage))) 844 goto out_fallback_unlock; 845 if (WARN_ON(PageMlocked(oldpage))) 846 goto out_fallback_unlock; 847 848 replace_page_cache_page(oldpage, newpage); 849 850 get_page(newpage); 851 852 if (!(buf->flags & PIPE_BUF_FLAG_LRU)) 853 lru_cache_add(newpage); 854 855 /* 856 * Release while we have extra ref on stolen page. Otherwise 857 * anon_pipe_buf_release() might think the page can be reused. 858 */ 859 pipe_buf_release(cs->pipe, buf); 860 861 err = 0; 862 spin_lock(&cs->req->waitq.lock); 863 if (test_bit(FR_ABORTED, &cs->req->flags)) 864 err = -ENOENT; 865 else 866 *pagep = newpage; 867 spin_unlock(&cs->req->waitq.lock); 868 869 if (err) { 870 unlock_page(newpage); 871 put_page(newpage); 872 goto out_put_old; 873 } 874 875 unlock_page(oldpage); 876 /* Drop ref for ap->pages[] array */ 877 put_page(oldpage); 878 cs->len = 0; 879 880 err = 0; 881 out_put_old: 882 /* Drop ref obtained in this function */ 883 put_page(oldpage); 884 return err; 885 886 out_fallback_unlock: 887 unlock_page(newpage); 888 out_fallback: 889 cs->pg = buf->page; 890 cs->offset = buf->offset; 891 892 err = lock_request(cs->req); 893 if (!err) 894 err = 1; 895 896 goto out_put_old; 897 } 898 899 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page, 900 unsigned offset, unsigned count) 901 { 902 struct pipe_buffer *buf; 903 int err; 904 905 if (cs->nr_segs >= cs->pipe->max_usage) 906 return -EIO; 907 908 get_page(page); 909 err = unlock_request(cs->req); 910 if (err) { 911 put_page(page); 912 return err; 913 } 914 915 fuse_copy_finish(cs); 916 917 buf = cs->pipebufs; 918 buf->page = page; 919 buf->offset = offset; 920 buf->len = count; 921 922 cs->pipebufs++; 923 cs->nr_segs++; 924 cs->len = 0; 925 926 return 0; 927 } 928 929 /* 930 * Copy a page in the request to/from the userspace buffer. Must be 931 * done atomically 932 */ 933 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep, 934 unsigned offset, unsigned count, int zeroing) 935 { 936 int err; 937 struct page *page = *pagep; 938 939 if (page && zeroing && count < PAGE_SIZE) 940 clear_highpage(page); 941 942 while (count) { 943 if (cs->write && cs->pipebufs && page) { 944 /* 945 * Can't control lifetime of pipe buffers, so always 946 * copy user pages. 947 */ 948 if (cs->req->args->user_pages) { 949 err = fuse_copy_fill(cs); 950 if (err) 951 return err; 952 } else { 953 return fuse_ref_page(cs, page, offset, count); 954 } 955 } else if (!cs->len) { 956 if (cs->move_pages && page && 957 offset == 0 && count == PAGE_SIZE) { 958 err = fuse_try_move_page(cs, pagep); 959 if (err <= 0) 960 return err; 961 } else { 962 err = fuse_copy_fill(cs); 963 if (err) 964 return err; 965 } 966 } 967 if (page) { 968 void *mapaddr = kmap_local_page(page); 969 void *buf = mapaddr + offset; 970 offset += fuse_copy_do(cs, &buf, &count); 971 kunmap_local(mapaddr); 972 } else 973 offset += fuse_copy_do(cs, NULL, &count); 974 } 975 if (page && !cs->write) 976 flush_dcache_page(page); 977 return 0; 978 } 979 980 /* Copy pages in the request to/from userspace buffer */ 981 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes, 982 int zeroing) 983 { 984 unsigned i; 985 struct fuse_req *req = cs->req; 986 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args); 987 988 989 for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) { 990 int err; 991 unsigned int offset = ap->descs[i].offset; 992 unsigned int count = min(nbytes, ap->descs[i].length); 993 994 err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing); 995 if (err) 996 return err; 997 998 nbytes -= count; 999 } 1000 return 0; 1001 } 1002 1003 /* Copy a single argument in the request to/from userspace buffer */ 1004 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size) 1005 { 1006 while (size) { 1007 if (!cs->len) { 1008 int err = fuse_copy_fill(cs); 1009 if (err) 1010 return err; 1011 } 1012 fuse_copy_do(cs, &val, &size); 1013 } 1014 return 0; 1015 } 1016 1017 /* Copy request arguments to/from userspace buffer */ 1018 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, 1019 unsigned argpages, struct fuse_arg *args, 1020 int zeroing) 1021 { 1022 int err = 0; 1023 unsigned i; 1024 1025 for (i = 0; !err && i < numargs; i++) { 1026 struct fuse_arg *arg = &args[i]; 1027 if (i == numargs - 1 && argpages) 1028 err = fuse_copy_pages(cs, arg->size, zeroing); 1029 else 1030 err = fuse_copy_one(cs, arg->value, arg->size); 1031 } 1032 return err; 1033 } 1034 1035 static int forget_pending(struct fuse_iqueue *fiq) 1036 { 1037 return fiq->forget_list_head.next != NULL; 1038 } 1039 1040 static int request_pending(struct fuse_iqueue *fiq) 1041 { 1042 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) || 1043 forget_pending(fiq); 1044 } 1045 1046 /* 1047 * Transfer an interrupt request to userspace 1048 * 1049 * Unlike other requests this is assembled on demand, without a need 1050 * to allocate a separate fuse_req structure. 1051 * 1052 * Called with fiq->lock held, releases it 1053 */ 1054 static int fuse_read_interrupt(struct fuse_iqueue *fiq, 1055 struct fuse_copy_state *cs, 1056 size_t nbytes, struct fuse_req *req) 1057 __releases(fiq->lock) 1058 { 1059 struct fuse_in_header ih; 1060 struct fuse_interrupt_in arg; 1061 unsigned reqsize = sizeof(ih) + sizeof(arg); 1062 int err; 1063 1064 list_del_init(&req->intr_entry); 1065 memset(&ih, 0, sizeof(ih)); 1066 memset(&arg, 0, sizeof(arg)); 1067 ih.len = reqsize; 1068 ih.opcode = FUSE_INTERRUPT; 1069 ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT); 1070 arg.unique = req->in.h.unique; 1071 1072 spin_unlock(&fiq->lock); 1073 if (nbytes < reqsize) 1074 return -EINVAL; 1075 1076 err = fuse_copy_one(cs, &ih, sizeof(ih)); 1077 if (!err) 1078 err = fuse_copy_one(cs, &arg, sizeof(arg)); 1079 fuse_copy_finish(cs); 1080 1081 return err ? err : reqsize; 1082 } 1083 1084 struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq, 1085 unsigned int max, 1086 unsigned int *countp) 1087 { 1088 struct fuse_forget_link *head = fiq->forget_list_head.next; 1089 struct fuse_forget_link **newhead = &head; 1090 unsigned count; 1091 1092 for (count = 0; *newhead != NULL && count < max; count++) 1093 newhead = &(*newhead)->next; 1094 1095 fiq->forget_list_head.next = *newhead; 1096 *newhead = NULL; 1097 if (fiq->forget_list_head.next == NULL) 1098 fiq->forget_list_tail = &fiq->forget_list_head; 1099 1100 if (countp != NULL) 1101 *countp = count; 1102 1103 return head; 1104 } 1105 EXPORT_SYMBOL(fuse_dequeue_forget); 1106 1107 static int fuse_read_single_forget(struct fuse_iqueue *fiq, 1108 struct fuse_copy_state *cs, 1109 size_t nbytes) 1110 __releases(fiq->lock) 1111 { 1112 int err; 1113 struct fuse_forget_link *forget = fuse_dequeue_forget(fiq, 1, NULL); 1114 struct fuse_forget_in arg = { 1115 .nlookup = forget->forget_one.nlookup, 1116 }; 1117 struct fuse_in_header ih = { 1118 .opcode = FUSE_FORGET, 1119 .nodeid = forget->forget_one.nodeid, 1120 .unique = fuse_get_unique(fiq), 1121 .len = sizeof(ih) + sizeof(arg), 1122 }; 1123 1124 spin_unlock(&fiq->lock); 1125 kfree(forget); 1126 if (nbytes < ih.len) 1127 return -EINVAL; 1128 1129 err = fuse_copy_one(cs, &ih, sizeof(ih)); 1130 if (!err) 1131 err = fuse_copy_one(cs, &arg, sizeof(arg)); 1132 fuse_copy_finish(cs); 1133 1134 if (err) 1135 return err; 1136 1137 return ih.len; 1138 } 1139 1140 static int fuse_read_batch_forget(struct fuse_iqueue *fiq, 1141 struct fuse_copy_state *cs, size_t nbytes) 1142 __releases(fiq->lock) 1143 { 1144 int err; 1145 unsigned max_forgets; 1146 unsigned count; 1147 struct fuse_forget_link *head; 1148 struct fuse_batch_forget_in arg = { .count = 0 }; 1149 struct fuse_in_header ih = { 1150 .opcode = FUSE_BATCH_FORGET, 1151 .unique = fuse_get_unique(fiq), 1152 .len = sizeof(ih) + sizeof(arg), 1153 }; 1154 1155 if (nbytes < ih.len) { 1156 spin_unlock(&fiq->lock); 1157 return -EINVAL; 1158 } 1159 1160 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one); 1161 head = fuse_dequeue_forget(fiq, max_forgets, &count); 1162 spin_unlock(&fiq->lock); 1163 1164 arg.count = count; 1165 ih.len += count * sizeof(struct fuse_forget_one); 1166 err = fuse_copy_one(cs, &ih, sizeof(ih)); 1167 if (!err) 1168 err = fuse_copy_one(cs, &arg, sizeof(arg)); 1169 1170 while (head) { 1171 struct fuse_forget_link *forget = head; 1172 1173 if (!err) { 1174 err = fuse_copy_one(cs, &forget->forget_one, 1175 sizeof(forget->forget_one)); 1176 } 1177 head = forget->next; 1178 kfree(forget); 1179 } 1180 1181 fuse_copy_finish(cs); 1182 1183 if (err) 1184 return err; 1185 1186 return ih.len; 1187 } 1188 1189 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq, 1190 struct fuse_copy_state *cs, 1191 size_t nbytes) 1192 __releases(fiq->lock) 1193 { 1194 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL) 1195 return fuse_read_single_forget(fiq, cs, nbytes); 1196 else 1197 return fuse_read_batch_forget(fiq, cs, nbytes); 1198 } 1199 1200 /* 1201 * Read a single request into the userspace filesystem's buffer. This 1202 * function waits until a request is available, then removes it from 1203 * the pending list and copies request data to userspace buffer. If 1204 * no reply is needed (FORGET) or request has been aborted or there 1205 * was an error during the copying then it's finished by calling 1206 * fuse_request_end(). Otherwise add it to the processing list, and set 1207 * the 'sent' flag. 1208 */ 1209 static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file, 1210 struct fuse_copy_state *cs, size_t nbytes) 1211 { 1212 ssize_t err; 1213 struct fuse_conn *fc = fud->fc; 1214 struct fuse_iqueue *fiq = &fc->iq; 1215 struct fuse_pqueue *fpq = &fud->pq; 1216 struct fuse_req *req; 1217 struct fuse_args *args; 1218 unsigned reqsize; 1219 unsigned int hash; 1220 1221 /* 1222 * Require sane minimum read buffer - that has capacity for fixed part 1223 * of any request header + negotiated max_write room for data. 1224 * 1225 * Historically libfuse reserves 4K for fixed header room, but e.g. 1226 * GlusterFS reserves only 80 bytes 1227 * 1228 * = `sizeof(fuse_in_header) + sizeof(fuse_write_in)` 1229 * 1230 * which is the absolute minimum any sane filesystem should be using 1231 * for header room. 1232 */ 1233 if (nbytes < max_t(size_t, FUSE_MIN_READ_BUFFER, 1234 sizeof(struct fuse_in_header) + 1235 sizeof(struct fuse_write_in) + 1236 fc->max_write)) 1237 return -EINVAL; 1238 1239 restart: 1240 for (;;) { 1241 spin_lock(&fiq->lock); 1242 if (!fiq->connected || request_pending(fiq)) 1243 break; 1244 spin_unlock(&fiq->lock); 1245 1246 if (file->f_flags & O_NONBLOCK) 1247 return -EAGAIN; 1248 err = wait_event_interruptible_exclusive(fiq->waitq, 1249 !fiq->connected || request_pending(fiq)); 1250 if (err) 1251 return err; 1252 } 1253 1254 if (!fiq->connected) { 1255 err = fc->aborted ? -ECONNABORTED : -ENODEV; 1256 goto err_unlock; 1257 } 1258 1259 if (!list_empty(&fiq->interrupts)) { 1260 req = list_entry(fiq->interrupts.next, struct fuse_req, 1261 intr_entry); 1262 return fuse_read_interrupt(fiq, cs, nbytes, req); 1263 } 1264 1265 if (forget_pending(fiq)) { 1266 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0) 1267 return fuse_read_forget(fc, fiq, cs, nbytes); 1268 1269 if (fiq->forget_batch <= -8) 1270 fiq->forget_batch = 16; 1271 } 1272 1273 req = list_entry(fiq->pending.next, struct fuse_req, list); 1274 clear_bit(FR_PENDING, &req->flags); 1275 list_del_init(&req->list); 1276 spin_unlock(&fiq->lock); 1277 1278 args = req->args; 1279 reqsize = req->in.h.len; 1280 1281 /* If request is too large, reply with an error and restart the read */ 1282 if (nbytes < reqsize) { 1283 req->out.h.error = -EIO; 1284 /* SETXATTR is special, since it may contain too large data */ 1285 if (args->opcode == FUSE_SETXATTR) 1286 req->out.h.error = -E2BIG; 1287 fuse_request_end(req); 1288 goto restart; 1289 } 1290 spin_lock(&fpq->lock); 1291 /* 1292 * Must not put request on fpq->io queue after having been shut down by 1293 * fuse_abort_conn() 1294 */ 1295 if (!fpq->connected) { 1296 req->out.h.error = err = -ECONNABORTED; 1297 goto out_end; 1298 1299 } 1300 list_add(&req->list, &fpq->io); 1301 spin_unlock(&fpq->lock); 1302 cs->req = req; 1303 err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h)); 1304 if (!err) 1305 err = fuse_copy_args(cs, args->in_numargs, args->in_pages, 1306 (struct fuse_arg *) args->in_args, 0); 1307 fuse_copy_finish(cs); 1308 spin_lock(&fpq->lock); 1309 clear_bit(FR_LOCKED, &req->flags); 1310 if (!fpq->connected) { 1311 err = fc->aborted ? -ECONNABORTED : -ENODEV; 1312 goto out_end; 1313 } 1314 if (err) { 1315 req->out.h.error = -EIO; 1316 goto out_end; 1317 } 1318 if (!test_bit(FR_ISREPLY, &req->flags)) { 1319 err = reqsize; 1320 goto out_end; 1321 } 1322 hash = fuse_req_hash(req->in.h.unique); 1323 list_move_tail(&req->list, &fpq->processing[hash]); 1324 __fuse_get_request(req); 1325 set_bit(FR_SENT, &req->flags); 1326 spin_unlock(&fpq->lock); 1327 /* matches barrier in request_wait_answer() */ 1328 smp_mb__after_atomic(); 1329 if (test_bit(FR_INTERRUPTED, &req->flags)) 1330 queue_interrupt(req); 1331 fuse_put_request(req); 1332 1333 return reqsize; 1334 1335 out_end: 1336 if (!test_bit(FR_PRIVATE, &req->flags)) 1337 list_del_init(&req->list); 1338 spin_unlock(&fpq->lock); 1339 fuse_request_end(req); 1340 return err; 1341 1342 err_unlock: 1343 spin_unlock(&fiq->lock); 1344 return err; 1345 } 1346 1347 static int fuse_dev_open(struct inode *inode, struct file *file) 1348 { 1349 /* 1350 * The fuse device's file's private_data is used to hold 1351 * the fuse_conn(ection) when it is mounted, and is used to 1352 * keep track of whether the file has been mounted already. 1353 */ 1354 file->private_data = NULL; 1355 return 0; 1356 } 1357 1358 static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to) 1359 { 1360 struct fuse_copy_state cs; 1361 struct file *file = iocb->ki_filp; 1362 struct fuse_dev *fud = fuse_get_dev(file); 1363 1364 if (!fud) 1365 return -EPERM; 1366 1367 if (!iter_is_iovec(to)) 1368 return -EINVAL; 1369 1370 fuse_copy_init(&cs, 1, to); 1371 1372 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to)); 1373 } 1374 1375 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, 1376 struct pipe_inode_info *pipe, 1377 size_t len, unsigned int flags) 1378 { 1379 int total, ret; 1380 int page_nr = 0; 1381 struct pipe_buffer *bufs; 1382 struct fuse_copy_state cs; 1383 struct fuse_dev *fud = fuse_get_dev(in); 1384 1385 if (!fud) 1386 return -EPERM; 1387 1388 bufs = kvmalloc_array(pipe->max_usage, sizeof(struct pipe_buffer), 1389 GFP_KERNEL); 1390 if (!bufs) 1391 return -ENOMEM; 1392 1393 fuse_copy_init(&cs, 1, NULL); 1394 cs.pipebufs = bufs; 1395 cs.pipe = pipe; 1396 ret = fuse_dev_do_read(fud, in, &cs, len); 1397 if (ret < 0) 1398 goto out; 1399 1400 if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) { 1401 ret = -EIO; 1402 goto out; 1403 } 1404 1405 for (ret = total = 0; page_nr < cs.nr_segs; total += ret) { 1406 /* 1407 * Need to be careful about this. Having buf->ops in module 1408 * code can Oops if the buffer persists after module unload. 1409 */ 1410 bufs[page_nr].ops = &nosteal_pipe_buf_ops; 1411 bufs[page_nr].flags = 0; 1412 ret = add_to_pipe(pipe, &bufs[page_nr++]); 1413 if (unlikely(ret < 0)) 1414 break; 1415 } 1416 if (total) 1417 ret = total; 1418 out: 1419 for (; page_nr < cs.nr_segs; page_nr++) 1420 put_page(bufs[page_nr].page); 1421 1422 kvfree(bufs); 1423 return ret; 1424 } 1425 1426 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, 1427 struct fuse_copy_state *cs) 1428 { 1429 struct fuse_notify_poll_wakeup_out outarg; 1430 int err = -EINVAL; 1431 1432 if (size != sizeof(outarg)) 1433 goto err; 1434 1435 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1436 if (err) 1437 goto err; 1438 1439 fuse_copy_finish(cs); 1440 return fuse_notify_poll_wakeup(fc, &outarg); 1441 1442 err: 1443 fuse_copy_finish(cs); 1444 return err; 1445 } 1446 1447 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size, 1448 struct fuse_copy_state *cs) 1449 { 1450 struct fuse_notify_inval_inode_out outarg; 1451 int err = -EINVAL; 1452 1453 if (size != sizeof(outarg)) 1454 goto err; 1455 1456 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1457 if (err) 1458 goto err; 1459 fuse_copy_finish(cs); 1460 1461 down_read(&fc->killsb); 1462 err = fuse_reverse_inval_inode(fc, outarg.ino, 1463 outarg.off, outarg.len); 1464 up_read(&fc->killsb); 1465 return err; 1466 1467 err: 1468 fuse_copy_finish(cs); 1469 return err; 1470 } 1471 1472 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, 1473 struct fuse_copy_state *cs) 1474 { 1475 struct fuse_notify_inval_entry_out outarg; 1476 int err = -ENOMEM; 1477 char *buf; 1478 struct qstr name; 1479 1480 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL); 1481 if (!buf) 1482 goto err; 1483 1484 err = -EINVAL; 1485 if (size < sizeof(outarg)) 1486 goto err; 1487 1488 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1489 if (err) 1490 goto err; 1491 1492 err = -ENAMETOOLONG; 1493 if (outarg.namelen > FUSE_NAME_MAX) 1494 goto err; 1495 1496 err = -EINVAL; 1497 if (size != sizeof(outarg) + outarg.namelen + 1) 1498 goto err; 1499 1500 name.name = buf; 1501 name.len = outarg.namelen; 1502 err = fuse_copy_one(cs, buf, outarg.namelen + 1); 1503 if (err) 1504 goto err; 1505 fuse_copy_finish(cs); 1506 buf[outarg.namelen] = 0; 1507 1508 down_read(&fc->killsb); 1509 err = fuse_reverse_inval_entry(fc, outarg.parent, 0, &name); 1510 up_read(&fc->killsb); 1511 kfree(buf); 1512 return err; 1513 1514 err: 1515 kfree(buf); 1516 fuse_copy_finish(cs); 1517 return err; 1518 } 1519 1520 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size, 1521 struct fuse_copy_state *cs) 1522 { 1523 struct fuse_notify_delete_out outarg; 1524 int err = -ENOMEM; 1525 char *buf; 1526 struct qstr name; 1527 1528 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL); 1529 if (!buf) 1530 goto err; 1531 1532 err = -EINVAL; 1533 if (size < sizeof(outarg)) 1534 goto err; 1535 1536 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1537 if (err) 1538 goto err; 1539 1540 err = -ENAMETOOLONG; 1541 if (outarg.namelen > FUSE_NAME_MAX) 1542 goto err; 1543 1544 err = -EINVAL; 1545 if (size != sizeof(outarg) + outarg.namelen + 1) 1546 goto err; 1547 1548 name.name = buf; 1549 name.len = outarg.namelen; 1550 err = fuse_copy_one(cs, buf, outarg.namelen + 1); 1551 if (err) 1552 goto err; 1553 fuse_copy_finish(cs); 1554 buf[outarg.namelen] = 0; 1555 1556 down_read(&fc->killsb); 1557 err = fuse_reverse_inval_entry(fc, outarg.parent, outarg.child, &name); 1558 up_read(&fc->killsb); 1559 kfree(buf); 1560 return err; 1561 1562 err: 1563 kfree(buf); 1564 fuse_copy_finish(cs); 1565 return err; 1566 } 1567 1568 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size, 1569 struct fuse_copy_state *cs) 1570 { 1571 struct fuse_notify_store_out outarg; 1572 struct inode *inode; 1573 struct address_space *mapping; 1574 u64 nodeid; 1575 int err; 1576 pgoff_t index; 1577 unsigned int offset; 1578 unsigned int num; 1579 loff_t file_size; 1580 loff_t end; 1581 1582 err = -EINVAL; 1583 if (size < sizeof(outarg)) 1584 goto out_finish; 1585 1586 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1587 if (err) 1588 goto out_finish; 1589 1590 err = -EINVAL; 1591 if (size - sizeof(outarg) != outarg.size) 1592 goto out_finish; 1593 1594 nodeid = outarg.nodeid; 1595 1596 down_read(&fc->killsb); 1597 1598 err = -ENOENT; 1599 inode = fuse_ilookup(fc, nodeid, NULL); 1600 if (!inode) 1601 goto out_up_killsb; 1602 1603 mapping = inode->i_mapping; 1604 index = outarg.offset >> PAGE_SHIFT; 1605 offset = outarg.offset & ~PAGE_MASK; 1606 file_size = i_size_read(inode); 1607 end = outarg.offset + outarg.size; 1608 if (end > file_size) { 1609 file_size = end; 1610 fuse_write_update_attr(inode, file_size, outarg.size); 1611 } 1612 1613 num = outarg.size; 1614 while (num) { 1615 struct page *page; 1616 unsigned int this_num; 1617 1618 err = -ENOMEM; 1619 page = find_or_create_page(mapping, index, 1620 mapping_gfp_mask(mapping)); 1621 if (!page) 1622 goto out_iput; 1623 1624 this_num = min_t(unsigned, num, PAGE_SIZE - offset); 1625 err = fuse_copy_page(cs, &page, offset, this_num, 0); 1626 if (!err && offset == 0 && 1627 (this_num == PAGE_SIZE || file_size == end)) 1628 SetPageUptodate(page); 1629 unlock_page(page); 1630 put_page(page); 1631 1632 if (err) 1633 goto out_iput; 1634 1635 num -= this_num; 1636 offset = 0; 1637 index++; 1638 } 1639 1640 err = 0; 1641 1642 out_iput: 1643 iput(inode); 1644 out_up_killsb: 1645 up_read(&fc->killsb); 1646 out_finish: 1647 fuse_copy_finish(cs); 1648 return err; 1649 } 1650 1651 struct fuse_retrieve_args { 1652 struct fuse_args_pages ap; 1653 struct fuse_notify_retrieve_in inarg; 1654 }; 1655 1656 static void fuse_retrieve_end(struct fuse_mount *fm, struct fuse_args *args, 1657 int error) 1658 { 1659 struct fuse_retrieve_args *ra = 1660 container_of(args, typeof(*ra), ap.args); 1661 1662 release_pages(ra->ap.pages, ra->ap.num_pages); 1663 kfree(ra); 1664 } 1665 1666 static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, 1667 struct fuse_notify_retrieve_out *outarg) 1668 { 1669 int err; 1670 struct address_space *mapping = inode->i_mapping; 1671 pgoff_t index; 1672 loff_t file_size; 1673 unsigned int num; 1674 unsigned int offset; 1675 size_t total_len = 0; 1676 unsigned int num_pages; 1677 struct fuse_conn *fc = fm->fc; 1678 struct fuse_retrieve_args *ra; 1679 size_t args_size = sizeof(*ra); 1680 struct fuse_args_pages *ap; 1681 struct fuse_args *args; 1682 1683 offset = outarg->offset & ~PAGE_MASK; 1684 file_size = i_size_read(inode); 1685 1686 num = min(outarg->size, fc->max_write); 1687 if (outarg->offset > file_size) 1688 num = 0; 1689 else if (outarg->offset + num > file_size) 1690 num = file_size - outarg->offset; 1691 1692 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 1693 num_pages = min(num_pages, fc->max_pages); 1694 1695 args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0])); 1696 1697 ra = kzalloc(args_size, GFP_KERNEL); 1698 if (!ra) 1699 return -ENOMEM; 1700 1701 ap = &ra->ap; 1702 ap->pages = (void *) (ra + 1); 1703 ap->descs = (void *) (ap->pages + num_pages); 1704 1705 args = &ap->args; 1706 args->nodeid = outarg->nodeid; 1707 args->opcode = FUSE_NOTIFY_REPLY; 1708 args->in_numargs = 2; 1709 args->in_pages = true; 1710 args->end = fuse_retrieve_end; 1711 1712 index = outarg->offset >> PAGE_SHIFT; 1713 1714 while (num && ap->num_pages < num_pages) { 1715 struct page *page; 1716 unsigned int this_num; 1717 1718 page = find_get_page(mapping, index); 1719 if (!page) 1720 break; 1721 1722 this_num = min_t(unsigned, num, PAGE_SIZE - offset); 1723 ap->pages[ap->num_pages] = page; 1724 ap->descs[ap->num_pages].offset = offset; 1725 ap->descs[ap->num_pages].length = this_num; 1726 ap->num_pages++; 1727 1728 offset = 0; 1729 num -= this_num; 1730 total_len += this_num; 1731 index++; 1732 } 1733 ra->inarg.offset = outarg->offset; 1734 ra->inarg.size = total_len; 1735 args->in_args[0].size = sizeof(ra->inarg); 1736 args->in_args[0].value = &ra->inarg; 1737 args->in_args[1].size = total_len; 1738 1739 err = fuse_simple_notify_reply(fm, args, outarg->notify_unique); 1740 if (err) 1741 fuse_retrieve_end(fm, args, err); 1742 1743 return err; 1744 } 1745 1746 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size, 1747 struct fuse_copy_state *cs) 1748 { 1749 struct fuse_notify_retrieve_out outarg; 1750 struct fuse_mount *fm; 1751 struct inode *inode; 1752 u64 nodeid; 1753 int err; 1754 1755 err = -EINVAL; 1756 if (size != sizeof(outarg)) 1757 goto copy_finish; 1758 1759 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1760 if (err) 1761 goto copy_finish; 1762 1763 fuse_copy_finish(cs); 1764 1765 down_read(&fc->killsb); 1766 err = -ENOENT; 1767 nodeid = outarg.nodeid; 1768 1769 inode = fuse_ilookup(fc, nodeid, &fm); 1770 if (inode) { 1771 err = fuse_retrieve(fm, inode, &outarg); 1772 iput(inode); 1773 } 1774 up_read(&fc->killsb); 1775 1776 return err; 1777 1778 copy_finish: 1779 fuse_copy_finish(cs); 1780 return err; 1781 } 1782 1783 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, 1784 unsigned int size, struct fuse_copy_state *cs) 1785 { 1786 /* Don't try to move pages (yet) */ 1787 cs->move_pages = 0; 1788 1789 switch (code) { 1790 case FUSE_NOTIFY_POLL: 1791 return fuse_notify_poll(fc, size, cs); 1792 1793 case FUSE_NOTIFY_INVAL_INODE: 1794 return fuse_notify_inval_inode(fc, size, cs); 1795 1796 case FUSE_NOTIFY_INVAL_ENTRY: 1797 return fuse_notify_inval_entry(fc, size, cs); 1798 1799 case FUSE_NOTIFY_STORE: 1800 return fuse_notify_store(fc, size, cs); 1801 1802 case FUSE_NOTIFY_RETRIEVE: 1803 return fuse_notify_retrieve(fc, size, cs); 1804 1805 case FUSE_NOTIFY_DELETE: 1806 return fuse_notify_delete(fc, size, cs); 1807 1808 default: 1809 fuse_copy_finish(cs); 1810 return -EINVAL; 1811 } 1812 } 1813 1814 /* Look up request on processing list by unique ID */ 1815 static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique) 1816 { 1817 unsigned int hash = fuse_req_hash(unique); 1818 struct fuse_req *req; 1819 1820 list_for_each_entry(req, &fpq->processing[hash], list) { 1821 if (req->in.h.unique == unique) 1822 return req; 1823 } 1824 return NULL; 1825 } 1826 1827 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args, 1828 unsigned nbytes) 1829 { 1830 unsigned reqsize = sizeof(struct fuse_out_header); 1831 1832 reqsize += fuse_len_args(args->out_numargs, args->out_args); 1833 1834 if (reqsize < nbytes || (reqsize > nbytes && !args->out_argvar)) 1835 return -EINVAL; 1836 else if (reqsize > nbytes) { 1837 struct fuse_arg *lastarg = &args->out_args[args->out_numargs-1]; 1838 unsigned diffsize = reqsize - nbytes; 1839 1840 if (diffsize > lastarg->size) 1841 return -EINVAL; 1842 lastarg->size -= diffsize; 1843 } 1844 return fuse_copy_args(cs, args->out_numargs, args->out_pages, 1845 args->out_args, args->page_zeroing); 1846 } 1847 1848 /* 1849 * Write a single reply to a request. First the header is copied from 1850 * the write buffer. The request is then searched on the processing 1851 * list by the unique ID found in the header. If found, then remove 1852 * it from the list and copy the rest of the buffer to the request. 1853 * The request is finished by calling fuse_request_end(). 1854 */ 1855 static ssize_t fuse_dev_do_write(struct fuse_dev *fud, 1856 struct fuse_copy_state *cs, size_t nbytes) 1857 { 1858 int err; 1859 struct fuse_conn *fc = fud->fc; 1860 struct fuse_pqueue *fpq = &fud->pq; 1861 struct fuse_req *req; 1862 struct fuse_out_header oh; 1863 1864 err = -EINVAL; 1865 if (nbytes < sizeof(struct fuse_out_header)) 1866 goto out; 1867 1868 err = fuse_copy_one(cs, &oh, sizeof(oh)); 1869 if (err) 1870 goto copy_finish; 1871 1872 err = -EINVAL; 1873 if (oh.len != nbytes) 1874 goto copy_finish; 1875 1876 /* 1877 * Zero oh.unique indicates unsolicited notification message 1878 * and error contains notification code. 1879 */ 1880 if (!oh.unique) { 1881 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs); 1882 goto out; 1883 } 1884 1885 err = -EINVAL; 1886 if (oh.error <= -512 || oh.error > 0) 1887 goto copy_finish; 1888 1889 spin_lock(&fpq->lock); 1890 req = NULL; 1891 if (fpq->connected) 1892 req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT); 1893 1894 err = -ENOENT; 1895 if (!req) { 1896 spin_unlock(&fpq->lock); 1897 goto copy_finish; 1898 } 1899 1900 /* Is it an interrupt reply ID? */ 1901 if (oh.unique & FUSE_INT_REQ_BIT) { 1902 __fuse_get_request(req); 1903 spin_unlock(&fpq->lock); 1904 1905 err = 0; 1906 if (nbytes != sizeof(struct fuse_out_header)) 1907 err = -EINVAL; 1908 else if (oh.error == -ENOSYS) 1909 fc->no_interrupt = 1; 1910 else if (oh.error == -EAGAIN) 1911 err = queue_interrupt(req); 1912 1913 fuse_put_request(req); 1914 1915 goto copy_finish; 1916 } 1917 1918 clear_bit(FR_SENT, &req->flags); 1919 list_move(&req->list, &fpq->io); 1920 req->out.h = oh; 1921 set_bit(FR_LOCKED, &req->flags); 1922 spin_unlock(&fpq->lock); 1923 cs->req = req; 1924 if (!req->args->page_replace) 1925 cs->move_pages = 0; 1926 1927 if (oh.error) 1928 err = nbytes != sizeof(oh) ? -EINVAL : 0; 1929 else 1930 err = copy_out_args(cs, req->args, nbytes); 1931 fuse_copy_finish(cs); 1932 1933 spin_lock(&fpq->lock); 1934 clear_bit(FR_LOCKED, &req->flags); 1935 if (!fpq->connected) 1936 err = -ENOENT; 1937 else if (err) 1938 req->out.h.error = -EIO; 1939 if (!test_bit(FR_PRIVATE, &req->flags)) 1940 list_del_init(&req->list); 1941 spin_unlock(&fpq->lock); 1942 1943 fuse_request_end(req); 1944 out: 1945 return err ? err : nbytes; 1946 1947 copy_finish: 1948 fuse_copy_finish(cs); 1949 goto out; 1950 } 1951 1952 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from) 1953 { 1954 struct fuse_copy_state cs; 1955 struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp); 1956 1957 if (!fud) 1958 return -EPERM; 1959 1960 if (!iter_is_iovec(from)) 1961 return -EINVAL; 1962 1963 fuse_copy_init(&cs, 0, from); 1964 1965 return fuse_dev_do_write(fud, &cs, iov_iter_count(from)); 1966 } 1967 1968 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, 1969 struct file *out, loff_t *ppos, 1970 size_t len, unsigned int flags) 1971 { 1972 unsigned int head, tail, mask, count; 1973 unsigned nbuf; 1974 unsigned idx; 1975 struct pipe_buffer *bufs; 1976 struct fuse_copy_state cs; 1977 struct fuse_dev *fud; 1978 size_t rem; 1979 ssize_t ret; 1980 1981 fud = fuse_get_dev(out); 1982 if (!fud) 1983 return -EPERM; 1984 1985 pipe_lock(pipe); 1986 1987 head = pipe->head; 1988 tail = pipe->tail; 1989 mask = pipe->ring_size - 1; 1990 count = head - tail; 1991 1992 bufs = kvmalloc_array(count, sizeof(struct pipe_buffer), GFP_KERNEL); 1993 if (!bufs) { 1994 pipe_unlock(pipe); 1995 return -ENOMEM; 1996 } 1997 1998 nbuf = 0; 1999 rem = 0; 2000 for (idx = tail; idx != head && rem < len; idx++) 2001 rem += pipe->bufs[idx & mask].len; 2002 2003 ret = -EINVAL; 2004 if (rem < len) 2005 goto out_free; 2006 2007 rem = len; 2008 while (rem) { 2009 struct pipe_buffer *ibuf; 2010 struct pipe_buffer *obuf; 2011 2012 if (WARN_ON(nbuf >= count || tail == head)) 2013 goto out_free; 2014 2015 ibuf = &pipe->bufs[tail & mask]; 2016 obuf = &bufs[nbuf]; 2017 2018 if (rem >= ibuf->len) { 2019 *obuf = *ibuf; 2020 ibuf->ops = NULL; 2021 tail++; 2022 pipe->tail = tail; 2023 } else { 2024 if (!pipe_buf_get(pipe, ibuf)) 2025 goto out_free; 2026 2027 *obuf = *ibuf; 2028 obuf->flags &= ~PIPE_BUF_FLAG_GIFT; 2029 obuf->len = rem; 2030 ibuf->offset += obuf->len; 2031 ibuf->len -= obuf->len; 2032 } 2033 nbuf++; 2034 rem -= obuf->len; 2035 } 2036 pipe_unlock(pipe); 2037 2038 fuse_copy_init(&cs, 0, NULL); 2039 cs.pipebufs = bufs; 2040 cs.nr_segs = nbuf; 2041 cs.pipe = pipe; 2042 2043 if (flags & SPLICE_F_MOVE) 2044 cs.move_pages = 1; 2045 2046 ret = fuse_dev_do_write(fud, &cs, len); 2047 2048 pipe_lock(pipe); 2049 out_free: 2050 for (idx = 0; idx < nbuf; idx++) { 2051 struct pipe_buffer *buf = &bufs[idx]; 2052 2053 if (buf->ops) 2054 pipe_buf_release(pipe, buf); 2055 } 2056 pipe_unlock(pipe); 2057 2058 kvfree(bufs); 2059 return ret; 2060 } 2061 2062 static __poll_t fuse_dev_poll(struct file *file, poll_table *wait) 2063 { 2064 __poll_t mask = EPOLLOUT | EPOLLWRNORM; 2065 struct fuse_iqueue *fiq; 2066 struct fuse_dev *fud = fuse_get_dev(file); 2067 2068 if (!fud) 2069 return EPOLLERR; 2070 2071 fiq = &fud->fc->iq; 2072 poll_wait(file, &fiq->waitq, wait); 2073 2074 spin_lock(&fiq->lock); 2075 if (!fiq->connected) 2076 mask = EPOLLERR; 2077 else if (request_pending(fiq)) 2078 mask |= EPOLLIN | EPOLLRDNORM; 2079 spin_unlock(&fiq->lock); 2080 2081 return mask; 2082 } 2083 2084 /* Abort all requests on the given list (pending or processing) */ 2085 static void end_requests(struct list_head *head) 2086 { 2087 while (!list_empty(head)) { 2088 struct fuse_req *req; 2089 req = list_entry(head->next, struct fuse_req, list); 2090 req->out.h.error = -ECONNABORTED; 2091 clear_bit(FR_SENT, &req->flags); 2092 list_del_init(&req->list); 2093 fuse_request_end(req); 2094 } 2095 } 2096 2097 static void end_polls(struct fuse_conn *fc) 2098 { 2099 struct rb_node *p; 2100 2101 p = rb_first(&fc->polled_files); 2102 2103 while (p) { 2104 struct fuse_file *ff; 2105 ff = rb_entry(p, struct fuse_file, polled_node); 2106 wake_up_interruptible_all(&ff->poll_wait); 2107 2108 p = rb_next(p); 2109 } 2110 } 2111 2112 /* 2113 * Abort all requests. 2114 * 2115 * Emergency exit in case of a malicious or accidental deadlock, or just a hung 2116 * filesystem. 2117 * 2118 * The same effect is usually achievable through killing the filesystem daemon 2119 * and all users of the filesystem. The exception is the combination of an 2120 * asynchronous request and the tricky deadlock (see 2121 * Documentation/filesystems/fuse.rst). 2122 * 2123 * Aborting requests under I/O goes as follows: 1: Separate out unlocked 2124 * requests, they should be finished off immediately. Locked requests will be 2125 * finished after unlock; see unlock_request(). 2: Finish off the unlocked 2126 * requests. It is possible that some request will finish before we can. This 2127 * is OK, the request will in that case be removed from the list before we touch 2128 * it. 2129 */ 2130 void fuse_abort_conn(struct fuse_conn *fc) 2131 { 2132 struct fuse_iqueue *fiq = &fc->iq; 2133 2134 spin_lock(&fc->lock); 2135 if (fc->connected) { 2136 struct fuse_dev *fud; 2137 struct fuse_req *req, *next; 2138 LIST_HEAD(to_end); 2139 unsigned int i; 2140 2141 /* Background queuing checks fc->connected under bg_lock */ 2142 spin_lock(&fc->bg_lock); 2143 fc->connected = 0; 2144 spin_unlock(&fc->bg_lock); 2145 2146 fuse_set_initialized(fc); 2147 list_for_each_entry(fud, &fc->devices, entry) { 2148 struct fuse_pqueue *fpq = &fud->pq; 2149 2150 spin_lock(&fpq->lock); 2151 fpq->connected = 0; 2152 list_for_each_entry_safe(req, next, &fpq->io, list) { 2153 req->out.h.error = -ECONNABORTED; 2154 spin_lock(&req->waitq.lock); 2155 set_bit(FR_ABORTED, &req->flags); 2156 if (!test_bit(FR_LOCKED, &req->flags)) { 2157 set_bit(FR_PRIVATE, &req->flags); 2158 __fuse_get_request(req); 2159 list_move(&req->list, &to_end); 2160 } 2161 spin_unlock(&req->waitq.lock); 2162 } 2163 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) 2164 list_splice_tail_init(&fpq->processing[i], 2165 &to_end); 2166 spin_unlock(&fpq->lock); 2167 } 2168 spin_lock(&fc->bg_lock); 2169 fc->blocked = 0; 2170 fc->max_background = UINT_MAX; 2171 flush_bg_queue(fc); 2172 spin_unlock(&fc->bg_lock); 2173 2174 spin_lock(&fiq->lock); 2175 fiq->connected = 0; 2176 list_for_each_entry(req, &fiq->pending, list) 2177 clear_bit(FR_PENDING, &req->flags); 2178 list_splice_tail_init(&fiq->pending, &to_end); 2179 while (forget_pending(fiq)) 2180 kfree(fuse_dequeue_forget(fiq, 1, NULL)); 2181 wake_up_all(&fiq->waitq); 2182 spin_unlock(&fiq->lock); 2183 kill_fasync(&fiq->fasync, SIGIO, POLL_IN); 2184 end_polls(fc); 2185 wake_up_all(&fc->blocked_waitq); 2186 spin_unlock(&fc->lock); 2187 2188 end_requests(&to_end); 2189 } else { 2190 spin_unlock(&fc->lock); 2191 } 2192 } 2193 EXPORT_SYMBOL_GPL(fuse_abort_conn); 2194 2195 void fuse_wait_aborted(struct fuse_conn *fc) 2196 { 2197 /* matches implicit memory barrier in fuse_drop_waiting() */ 2198 smp_mb(); 2199 wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0); 2200 } 2201 2202 int fuse_dev_release(struct inode *inode, struct file *file) 2203 { 2204 struct fuse_dev *fud = fuse_get_dev(file); 2205 2206 if (fud) { 2207 struct fuse_conn *fc = fud->fc; 2208 struct fuse_pqueue *fpq = &fud->pq; 2209 LIST_HEAD(to_end); 2210 unsigned int i; 2211 2212 spin_lock(&fpq->lock); 2213 WARN_ON(!list_empty(&fpq->io)); 2214 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) 2215 list_splice_init(&fpq->processing[i], &to_end); 2216 spin_unlock(&fpq->lock); 2217 2218 end_requests(&to_end); 2219 2220 /* Are we the last open device? */ 2221 if (atomic_dec_and_test(&fc->dev_count)) { 2222 WARN_ON(fc->iq.fasync != NULL); 2223 fuse_abort_conn(fc); 2224 } 2225 fuse_dev_free(fud); 2226 } 2227 return 0; 2228 } 2229 EXPORT_SYMBOL_GPL(fuse_dev_release); 2230 2231 static int fuse_dev_fasync(int fd, struct file *file, int on) 2232 { 2233 struct fuse_dev *fud = fuse_get_dev(file); 2234 2235 if (!fud) 2236 return -EPERM; 2237 2238 /* No locking - fasync_helper does its own locking */ 2239 return fasync_helper(fd, file, on, &fud->fc->iq.fasync); 2240 } 2241 2242 static int fuse_device_clone(struct fuse_conn *fc, struct file *new) 2243 { 2244 struct fuse_dev *fud; 2245 2246 if (new->private_data) 2247 return -EINVAL; 2248 2249 fud = fuse_dev_alloc_install(fc); 2250 if (!fud) 2251 return -ENOMEM; 2252 2253 new->private_data = fud; 2254 atomic_inc(&fc->dev_count); 2255 2256 return 0; 2257 } 2258 2259 static long fuse_dev_ioctl(struct file *file, unsigned int cmd, 2260 unsigned long arg) 2261 { 2262 int res; 2263 int oldfd; 2264 struct fuse_dev *fud = NULL; 2265 2266 switch (cmd) { 2267 case FUSE_DEV_IOC_CLONE: 2268 res = -EFAULT; 2269 if (!get_user(oldfd, (__u32 __user *)arg)) { 2270 struct file *old = fget(oldfd); 2271 2272 res = -EINVAL; 2273 if (old) { 2274 /* 2275 * Check against file->f_op because CUSE 2276 * uses the same ioctl handler. 2277 */ 2278 if (old->f_op == file->f_op && 2279 old->f_cred->user_ns == file->f_cred->user_ns) 2280 fud = fuse_get_dev(old); 2281 2282 if (fud) { 2283 mutex_lock(&fuse_mutex); 2284 res = fuse_device_clone(fud->fc, file); 2285 mutex_unlock(&fuse_mutex); 2286 } 2287 fput(old); 2288 } 2289 } 2290 break; 2291 default: 2292 res = -ENOTTY; 2293 break; 2294 } 2295 return res; 2296 } 2297 2298 const struct file_operations fuse_dev_operations = { 2299 .owner = THIS_MODULE, 2300 .open = fuse_dev_open, 2301 .llseek = no_llseek, 2302 .read_iter = fuse_dev_read, 2303 .splice_read = fuse_dev_splice_read, 2304 .write_iter = fuse_dev_write, 2305 .splice_write = fuse_dev_splice_write, 2306 .poll = fuse_dev_poll, 2307 .release = fuse_dev_release, 2308 .fasync = fuse_dev_fasync, 2309 .unlocked_ioctl = fuse_dev_ioctl, 2310 .compat_ioctl = compat_ptr_ioctl, 2311 }; 2312 EXPORT_SYMBOL_GPL(fuse_dev_operations); 2313 2314 static struct miscdevice fuse_miscdevice = { 2315 .minor = FUSE_MINOR, 2316 .name = "fuse", 2317 .fops = &fuse_dev_operations, 2318 }; 2319 2320 int __init fuse_dev_init(void) 2321 { 2322 int err = -ENOMEM; 2323 fuse_req_cachep = kmem_cache_create("fuse_request", 2324 sizeof(struct fuse_req), 2325 0, 0, NULL); 2326 if (!fuse_req_cachep) 2327 goto out; 2328 2329 err = misc_register(&fuse_miscdevice); 2330 if (err) 2331 goto out_cache_clean; 2332 2333 return 0; 2334 2335 out_cache_clean: 2336 kmem_cache_destroy(fuse_req_cachep); 2337 out: 2338 return err; 2339 } 2340 2341 void fuse_dev_cleanup(void) 2342 { 2343 misc_deregister(&fuse_miscdevice); 2344 kmem_cache_destroy(fuse_req_cachep); 2345 } 2346