1 /* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7 */ 8 9 #include "fuse_i.h" 10 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/poll.h> 14 #include <linux/sched/signal.h> 15 #include <linux/uio.h> 16 #include <linux/miscdevice.h> 17 #include <linux/pagemap.h> 18 #include <linux/file.h> 19 #include <linux/slab.h> 20 #include <linux/pipe_fs_i.h> 21 #include <linux/swap.h> 22 #include <linux/splice.h> 23 #include <linux/sched.h> 24 25 MODULE_ALIAS_MISCDEV(FUSE_MINOR); 26 MODULE_ALIAS("devname:fuse"); 27 28 /* Ordinary requests have even IDs, while interrupts IDs are odd */ 29 #define FUSE_INT_REQ_BIT (1ULL << 0) 30 #define FUSE_REQ_ID_STEP (1ULL << 1) 31 32 static struct kmem_cache *fuse_req_cachep; 33 34 static struct fuse_dev *fuse_get_dev(struct file *file) 35 { 36 /* 37 * Lockless access is OK, because file->private data is set 38 * once during mount and is valid until the file is released. 39 */ 40 return READ_ONCE(file->private_data); 41 } 42 43 static void fuse_request_init(struct fuse_req *req) 44 { 45 INIT_LIST_HEAD(&req->list); 46 INIT_LIST_HEAD(&req->intr_entry); 47 init_waitqueue_head(&req->waitq); 48 refcount_set(&req->count, 1); 49 __set_bit(FR_PENDING, &req->flags); 50 } 51 52 static struct fuse_req *fuse_request_alloc(gfp_t flags) 53 { 54 struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags); 55 if (req) 56 fuse_request_init(req); 57 58 return req; 59 } 60 61 static void fuse_request_free(struct fuse_req *req) 62 { 63 kmem_cache_free(fuse_req_cachep, req); 64 } 65 66 static void __fuse_get_request(struct fuse_req *req) 67 { 68 refcount_inc(&req->count); 69 } 70 71 /* Must be called with > 1 refcount */ 72 static void __fuse_put_request(struct fuse_req *req) 73 { 74 refcount_dec(&req->count); 75 } 76 77 void fuse_set_initialized(struct fuse_conn *fc) 78 { 79 /* Make sure stores before this are seen on another CPU */ 80 smp_wmb(); 81 fc->initialized = 1; 82 } 83 84 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) 85 { 86 return !fc->initialized || (for_background && fc->blocked); 87 } 88 89 static void fuse_drop_waiting(struct fuse_conn *fc) 90 { 91 /* 92 * lockess check of fc->connected is okay, because atomic_dec_and_test() 93 * provides a memory barrier mached with the one in fuse_wait_aborted() 94 * to ensure no wake-up is missed. 95 */ 96 if (atomic_dec_and_test(&fc->num_waiting) && 97 !READ_ONCE(fc->connected)) { 98 /* wake up aborters */ 99 wake_up_all(&fc->blocked_waitq); 100 } 101 } 102 103 static void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req); 104 105 static struct fuse_req *fuse_get_req(struct fuse_conn *fc, bool for_background) 106 { 107 struct fuse_req *req; 108 int err; 109 atomic_inc(&fc->num_waiting); 110 111 if (fuse_block_alloc(fc, for_background)) { 112 err = -EINTR; 113 if (wait_event_killable_exclusive(fc->blocked_waitq, 114 !fuse_block_alloc(fc, for_background))) 115 goto out; 116 } 117 /* Matches smp_wmb() in fuse_set_initialized() */ 118 smp_rmb(); 119 120 err = -ENOTCONN; 121 if (!fc->connected) 122 goto out; 123 124 err = -ECONNREFUSED; 125 if (fc->conn_error) 126 goto out; 127 128 req = fuse_request_alloc(GFP_KERNEL); 129 err = -ENOMEM; 130 if (!req) { 131 if (for_background) 132 wake_up(&fc->blocked_waitq); 133 goto out; 134 } 135 136 req->in.h.uid = from_kuid(fc->user_ns, current_fsuid()); 137 req->in.h.gid = from_kgid(fc->user_ns, current_fsgid()); 138 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); 139 140 __set_bit(FR_WAITING, &req->flags); 141 if (for_background) 142 __set_bit(FR_BACKGROUND, &req->flags); 143 144 if (unlikely(req->in.h.uid == ((uid_t)-1) || 145 req->in.h.gid == ((gid_t)-1))) { 146 fuse_put_request(fc, req); 147 return ERR_PTR(-EOVERFLOW); 148 } 149 return req; 150 151 out: 152 fuse_drop_waiting(fc); 153 return ERR_PTR(err); 154 } 155 156 static void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) 157 { 158 if (refcount_dec_and_test(&req->count)) { 159 if (test_bit(FR_BACKGROUND, &req->flags)) { 160 /* 161 * We get here in the unlikely case that a background 162 * request was allocated but not sent 163 */ 164 spin_lock(&fc->bg_lock); 165 if (!fc->blocked) 166 wake_up(&fc->blocked_waitq); 167 spin_unlock(&fc->bg_lock); 168 } 169 170 if (test_bit(FR_WAITING, &req->flags)) { 171 __clear_bit(FR_WAITING, &req->flags); 172 fuse_drop_waiting(fc); 173 } 174 175 fuse_request_free(req); 176 } 177 } 178 179 unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args) 180 { 181 unsigned nbytes = 0; 182 unsigned i; 183 184 for (i = 0; i < numargs; i++) 185 nbytes += args[i].size; 186 187 return nbytes; 188 } 189 EXPORT_SYMBOL_GPL(fuse_len_args); 190 191 u64 fuse_get_unique(struct fuse_iqueue *fiq) 192 { 193 fiq->reqctr += FUSE_REQ_ID_STEP; 194 return fiq->reqctr; 195 } 196 EXPORT_SYMBOL_GPL(fuse_get_unique); 197 198 static unsigned int fuse_req_hash(u64 unique) 199 { 200 return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS); 201 } 202 203 /** 204 * A new request is available, wake fiq->waitq 205 */ 206 static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq) 207 __releases(fiq->lock) 208 { 209 wake_up(&fiq->waitq); 210 kill_fasync(&fiq->fasync, SIGIO, POLL_IN); 211 spin_unlock(&fiq->lock); 212 } 213 214 const struct fuse_iqueue_ops fuse_dev_fiq_ops = { 215 .wake_forget_and_unlock = fuse_dev_wake_and_unlock, 216 .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock, 217 .wake_pending_and_unlock = fuse_dev_wake_and_unlock, 218 }; 219 EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops); 220 221 static void queue_request_and_unlock(struct fuse_iqueue *fiq, 222 struct fuse_req *req) 223 __releases(fiq->lock) 224 { 225 req->in.h.len = sizeof(struct fuse_in_header) + 226 fuse_len_args(req->args->in_numargs, 227 (struct fuse_arg *) req->args->in_args); 228 list_add_tail(&req->list, &fiq->pending); 229 fiq->ops->wake_pending_and_unlock(fiq); 230 } 231 232 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, 233 u64 nodeid, u64 nlookup) 234 { 235 struct fuse_iqueue *fiq = &fc->iq; 236 237 forget->forget_one.nodeid = nodeid; 238 forget->forget_one.nlookup = nlookup; 239 240 spin_lock(&fiq->lock); 241 if (fiq->connected) { 242 fiq->forget_list_tail->next = forget; 243 fiq->forget_list_tail = forget; 244 fiq->ops->wake_forget_and_unlock(fiq); 245 } else { 246 kfree(forget); 247 spin_unlock(&fiq->lock); 248 } 249 } 250 251 static void flush_bg_queue(struct fuse_conn *fc) 252 { 253 struct fuse_iqueue *fiq = &fc->iq; 254 255 while (fc->active_background < fc->max_background && 256 !list_empty(&fc->bg_queue)) { 257 struct fuse_req *req; 258 259 req = list_first_entry(&fc->bg_queue, struct fuse_req, list); 260 list_del(&req->list); 261 fc->active_background++; 262 spin_lock(&fiq->lock); 263 req->in.h.unique = fuse_get_unique(fiq); 264 queue_request_and_unlock(fiq, req); 265 } 266 } 267 268 /* 269 * This function is called when a request is finished. Either a reply 270 * has arrived or it was aborted (and not yet sent) or some error 271 * occurred during communication with userspace, or the device file 272 * was closed. The requester thread is woken up (if still waiting), 273 * the 'end' callback is called if given, else the reference to the 274 * request is released 275 */ 276 void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req) 277 { 278 struct fuse_iqueue *fiq = &fc->iq; 279 280 if (test_and_set_bit(FR_FINISHED, &req->flags)) 281 goto put_request; 282 283 /* 284 * test_and_set_bit() implies smp_mb() between bit 285 * changing and below intr_entry check. Pairs with 286 * smp_mb() from queue_interrupt(). 287 */ 288 if (!list_empty(&req->intr_entry)) { 289 spin_lock(&fiq->lock); 290 list_del_init(&req->intr_entry); 291 spin_unlock(&fiq->lock); 292 } 293 WARN_ON(test_bit(FR_PENDING, &req->flags)); 294 WARN_ON(test_bit(FR_SENT, &req->flags)); 295 if (test_bit(FR_BACKGROUND, &req->flags)) { 296 spin_lock(&fc->bg_lock); 297 clear_bit(FR_BACKGROUND, &req->flags); 298 if (fc->num_background == fc->max_background) { 299 fc->blocked = 0; 300 wake_up(&fc->blocked_waitq); 301 } else if (!fc->blocked) { 302 /* 303 * Wake up next waiter, if any. It's okay to use 304 * waitqueue_active(), as we've already synced up 305 * fc->blocked with waiters with the wake_up() call 306 * above. 307 */ 308 if (waitqueue_active(&fc->blocked_waitq)) 309 wake_up(&fc->blocked_waitq); 310 } 311 312 if (fc->num_background == fc->congestion_threshold && fc->sb) { 313 clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC); 314 clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC); 315 } 316 fc->num_background--; 317 fc->active_background--; 318 flush_bg_queue(fc); 319 spin_unlock(&fc->bg_lock); 320 } else { 321 /* Wake up waiter sleeping in request_wait_answer() */ 322 wake_up(&req->waitq); 323 } 324 325 if (test_bit(FR_ASYNC, &req->flags)) 326 req->args->end(fc, req->args, req->out.h.error); 327 put_request: 328 fuse_put_request(fc, req); 329 } 330 EXPORT_SYMBOL_GPL(fuse_request_end); 331 332 static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) 333 { 334 spin_lock(&fiq->lock); 335 /* Check for we've sent request to interrupt this req */ 336 if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) { 337 spin_unlock(&fiq->lock); 338 return -EINVAL; 339 } 340 341 if (list_empty(&req->intr_entry)) { 342 list_add_tail(&req->intr_entry, &fiq->interrupts); 343 /* 344 * Pairs with smp_mb() implied by test_and_set_bit() 345 * from fuse_request_end(). 346 */ 347 smp_mb(); 348 if (test_bit(FR_FINISHED, &req->flags)) { 349 list_del_init(&req->intr_entry); 350 spin_unlock(&fiq->lock); 351 return 0; 352 } 353 fiq->ops->wake_interrupt_and_unlock(fiq); 354 } else { 355 spin_unlock(&fiq->lock); 356 } 357 return 0; 358 } 359 360 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) 361 { 362 struct fuse_iqueue *fiq = &fc->iq; 363 int err; 364 365 if (!fc->no_interrupt) { 366 /* Any signal may interrupt this */ 367 err = wait_event_interruptible(req->waitq, 368 test_bit(FR_FINISHED, &req->flags)); 369 if (!err) 370 return; 371 372 set_bit(FR_INTERRUPTED, &req->flags); 373 /* matches barrier in fuse_dev_do_read() */ 374 smp_mb__after_atomic(); 375 if (test_bit(FR_SENT, &req->flags)) 376 queue_interrupt(fiq, req); 377 } 378 379 if (!test_bit(FR_FORCE, &req->flags)) { 380 /* Only fatal signals may interrupt this */ 381 err = wait_event_killable(req->waitq, 382 test_bit(FR_FINISHED, &req->flags)); 383 if (!err) 384 return; 385 386 spin_lock(&fiq->lock); 387 /* Request is not yet in userspace, bail out */ 388 if (test_bit(FR_PENDING, &req->flags)) { 389 list_del(&req->list); 390 spin_unlock(&fiq->lock); 391 __fuse_put_request(req); 392 req->out.h.error = -EINTR; 393 return; 394 } 395 spin_unlock(&fiq->lock); 396 } 397 398 /* 399 * Either request is already in userspace, or it was forced. 400 * Wait it out. 401 */ 402 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags)); 403 } 404 405 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) 406 { 407 struct fuse_iqueue *fiq = &fc->iq; 408 409 BUG_ON(test_bit(FR_BACKGROUND, &req->flags)); 410 spin_lock(&fiq->lock); 411 if (!fiq->connected) { 412 spin_unlock(&fiq->lock); 413 req->out.h.error = -ENOTCONN; 414 } else { 415 req->in.h.unique = fuse_get_unique(fiq); 416 /* acquire extra reference, since request is still needed 417 after fuse_request_end() */ 418 __fuse_get_request(req); 419 queue_request_and_unlock(fiq, req); 420 421 request_wait_answer(fc, req); 422 /* Pairs with smp_wmb() in fuse_request_end() */ 423 smp_rmb(); 424 } 425 } 426 427 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) 428 { 429 if (fc->minor < 4 && args->opcode == FUSE_STATFS) 430 args->out_args[0].size = FUSE_COMPAT_STATFS_SIZE; 431 432 if (fc->minor < 9) { 433 switch (args->opcode) { 434 case FUSE_LOOKUP: 435 case FUSE_CREATE: 436 case FUSE_MKNOD: 437 case FUSE_MKDIR: 438 case FUSE_SYMLINK: 439 case FUSE_LINK: 440 args->out_args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; 441 break; 442 case FUSE_GETATTR: 443 case FUSE_SETATTR: 444 args->out_args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; 445 break; 446 } 447 } 448 if (fc->minor < 12) { 449 switch (args->opcode) { 450 case FUSE_CREATE: 451 args->in_args[0].size = sizeof(struct fuse_open_in); 452 break; 453 case FUSE_MKNOD: 454 args->in_args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE; 455 break; 456 } 457 } 458 } 459 460 static void fuse_force_creds(struct fuse_conn *fc, struct fuse_req *req) 461 { 462 req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid()); 463 req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid()); 464 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); 465 } 466 467 static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args) 468 { 469 req->in.h.opcode = args->opcode; 470 req->in.h.nodeid = args->nodeid; 471 req->args = args; 472 if (args->end) 473 __set_bit(FR_ASYNC, &req->flags); 474 } 475 476 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) 477 { 478 struct fuse_req *req; 479 ssize_t ret; 480 481 if (args->force) { 482 atomic_inc(&fc->num_waiting); 483 req = fuse_request_alloc(GFP_KERNEL | __GFP_NOFAIL); 484 485 if (!args->nocreds) 486 fuse_force_creds(fc, req); 487 488 __set_bit(FR_WAITING, &req->flags); 489 __set_bit(FR_FORCE, &req->flags); 490 } else { 491 WARN_ON(args->nocreds); 492 req = fuse_get_req(fc, false); 493 if (IS_ERR(req)) 494 return PTR_ERR(req); 495 } 496 497 /* Needs to be done after fuse_get_req() so that fc->minor is valid */ 498 fuse_adjust_compat(fc, args); 499 fuse_args_to_req(req, args); 500 501 if (!args->noreply) 502 __set_bit(FR_ISREPLY, &req->flags); 503 __fuse_request_send(fc, req); 504 ret = req->out.h.error; 505 if (!ret && args->out_argvar) { 506 BUG_ON(args->out_numargs == 0); 507 ret = args->out_args[args->out_numargs - 1].size; 508 } 509 fuse_put_request(fc, req); 510 511 return ret; 512 } 513 514 static bool fuse_request_queue_background(struct fuse_conn *fc, 515 struct fuse_req *req) 516 { 517 bool queued = false; 518 519 WARN_ON(!test_bit(FR_BACKGROUND, &req->flags)); 520 if (!test_bit(FR_WAITING, &req->flags)) { 521 __set_bit(FR_WAITING, &req->flags); 522 atomic_inc(&fc->num_waiting); 523 } 524 __set_bit(FR_ISREPLY, &req->flags); 525 spin_lock(&fc->bg_lock); 526 if (likely(fc->connected)) { 527 fc->num_background++; 528 if (fc->num_background == fc->max_background) 529 fc->blocked = 1; 530 if (fc->num_background == fc->congestion_threshold && fc->sb) { 531 set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC); 532 set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC); 533 } 534 list_add_tail(&req->list, &fc->bg_queue); 535 flush_bg_queue(fc); 536 queued = true; 537 } 538 spin_unlock(&fc->bg_lock); 539 540 return queued; 541 } 542 543 int fuse_simple_background(struct fuse_conn *fc, struct fuse_args *args, 544 gfp_t gfp_flags) 545 { 546 struct fuse_req *req; 547 548 if (args->force) { 549 WARN_ON(!args->nocreds); 550 req = fuse_request_alloc(gfp_flags); 551 if (!req) 552 return -ENOMEM; 553 __set_bit(FR_BACKGROUND, &req->flags); 554 } else { 555 WARN_ON(args->nocreds); 556 req = fuse_get_req(fc, true); 557 if (IS_ERR(req)) 558 return PTR_ERR(req); 559 } 560 561 fuse_args_to_req(req, args); 562 563 if (!fuse_request_queue_background(fc, req)) { 564 fuse_put_request(fc, req); 565 return -ENOTCONN; 566 } 567 568 return 0; 569 } 570 EXPORT_SYMBOL_GPL(fuse_simple_background); 571 572 static int fuse_simple_notify_reply(struct fuse_conn *fc, 573 struct fuse_args *args, u64 unique) 574 { 575 struct fuse_req *req; 576 struct fuse_iqueue *fiq = &fc->iq; 577 int err = 0; 578 579 req = fuse_get_req(fc, false); 580 if (IS_ERR(req)) 581 return PTR_ERR(req); 582 583 __clear_bit(FR_ISREPLY, &req->flags); 584 req->in.h.unique = unique; 585 586 fuse_args_to_req(req, args); 587 588 spin_lock(&fiq->lock); 589 if (fiq->connected) { 590 queue_request_and_unlock(fiq, req); 591 } else { 592 err = -ENODEV; 593 spin_unlock(&fiq->lock); 594 fuse_put_request(fc, req); 595 } 596 597 return err; 598 } 599 600 /* 601 * Lock the request. Up to the next unlock_request() there mustn't be 602 * anything that could cause a page-fault. If the request was already 603 * aborted bail out. 604 */ 605 static int lock_request(struct fuse_req *req) 606 { 607 int err = 0; 608 if (req) { 609 spin_lock(&req->waitq.lock); 610 if (test_bit(FR_ABORTED, &req->flags)) 611 err = -ENOENT; 612 else 613 set_bit(FR_LOCKED, &req->flags); 614 spin_unlock(&req->waitq.lock); 615 } 616 return err; 617 } 618 619 /* 620 * Unlock request. If it was aborted while locked, caller is responsible 621 * for unlocking and ending the request. 622 */ 623 static int unlock_request(struct fuse_req *req) 624 { 625 int err = 0; 626 if (req) { 627 spin_lock(&req->waitq.lock); 628 if (test_bit(FR_ABORTED, &req->flags)) 629 err = -ENOENT; 630 else 631 clear_bit(FR_LOCKED, &req->flags); 632 spin_unlock(&req->waitq.lock); 633 } 634 return err; 635 } 636 637 struct fuse_copy_state { 638 int write; 639 struct fuse_req *req; 640 struct iov_iter *iter; 641 struct pipe_buffer *pipebufs; 642 struct pipe_buffer *currbuf; 643 struct pipe_inode_info *pipe; 644 unsigned long nr_segs; 645 struct page *pg; 646 unsigned len; 647 unsigned offset; 648 unsigned move_pages:1; 649 }; 650 651 static void fuse_copy_init(struct fuse_copy_state *cs, int write, 652 struct iov_iter *iter) 653 { 654 memset(cs, 0, sizeof(*cs)); 655 cs->write = write; 656 cs->iter = iter; 657 } 658 659 /* Unmap and put previous page of userspace buffer */ 660 static void fuse_copy_finish(struct fuse_copy_state *cs) 661 { 662 if (cs->currbuf) { 663 struct pipe_buffer *buf = cs->currbuf; 664 665 if (cs->write) 666 buf->len = PAGE_SIZE - cs->len; 667 cs->currbuf = NULL; 668 } else if (cs->pg) { 669 if (cs->write) { 670 flush_dcache_page(cs->pg); 671 set_page_dirty_lock(cs->pg); 672 } 673 put_page(cs->pg); 674 } 675 cs->pg = NULL; 676 } 677 678 /* 679 * Get another pagefull of userspace buffer, and map it to kernel 680 * address space, and lock request 681 */ 682 static int fuse_copy_fill(struct fuse_copy_state *cs) 683 { 684 struct page *page; 685 int err; 686 687 err = unlock_request(cs->req); 688 if (err) 689 return err; 690 691 fuse_copy_finish(cs); 692 if (cs->pipebufs) { 693 struct pipe_buffer *buf = cs->pipebufs; 694 695 if (!cs->write) { 696 err = pipe_buf_confirm(cs->pipe, buf); 697 if (err) 698 return err; 699 700 BUG_ON(!cs->nr_segs); 701 cs->currbuf = buf; 702 cs->pg = buf->page; 703 cs->offset = buf->offset; 704 cs->len = buf->len; 705 cs->pipebufs++; 706 cs->nr_segs--; 707 } else { 708 if (cs->nr_segs >= cs->pipe->max_usage) 709 return -EIO; 710 711 page = alloc_page(GFP_HIGHUSER); 712 if (!page) 713 return -ENOMEM; 714 715 buf->page = page; 716 buf->offset = 0; 717 buf->len = 0; 718 719 cs->currbuf = buf; 720 cs->pg = page; 721 cs->offset = 0; 722 cs->len = PAGE_SIZE; 723 cs->pipebufs++; 724 cs->nr_segs++; 725 } 726 } else { 727 size_t off; 728 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off); 729 if (err < 0) 730 return err; 731 BUG_ON(!err); 732 cs->len = err; 733 cs->offset = off; 734 cs->pg = page; 735 iov_iter_advance(cs->iter, err); 736 } 737 738 return lock_request(cs->req); 739 } 740 741 /* Do as much copy to/from userspace buffer as we can */ 742 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) 743 { 744 unsigned ncpy = min(*size, cs->len); 745 if (val) { 746 void *pgaddr = kmap_atomic(cs->pg); 747 void *buf = pgaddr + cs->offset; 748 749 if (cs->write) 750 memcpy(buf, *val, ncpy); 751 else 752 memcpy(*val, buf, ncpy); 753 754 kunmap_atomic(pgaddr); 755 *val += ncpy; 756 } 757 *size -= ncpy; 758 cs->len -= ncpy; 759 cs->offset += ncpy; 760 return ncpy; 761 } 762 763 static int fuse_check_page(struct page *page) 764 { 765 if (page_mapcount(page) || 766 page->mapping != NULL || 767 (page->flags & PAGE_FLAGS_CHECK_AT_PREP & 768 ~(1 << PG_locked | 769 1 << PG_referenced | 770 1 << PG_uptodate | 771 1 << PG_lru | 772 1 << PG_active | 773 1 << PG_reclaim | 774 1 << PG_waiters))) { 775 dump_page(page, "fuse: trying to steal weird page"); 776 return 1; 777 } 778 return 0; 779 } 780 781 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) 782 { 783 int err; 784 struct page *oldpage = *pagep; 785 struct page *newpage; 786 struct pipe_buffer *buf = cs->pipebufs; 787 788 get_page(oldpage); 789 err = unlock_request(cs->req); 790 if (err) 791 goto out_put_old; 792 793 fuse_copy_finish(cs); 794 795 err = pipe_buf_confirm(cs->pipe, buf); 796 if (err) 797 goto out_put_old; 798 799 BUG_ON(!cs->nr_segs); 800 cs->currbuf = buf; 801 cs->len = buf->len; 802 cs->pipebufs++; 803 cs->nr_segs--; 804 805 if (cs->len != PAGE_SIZE) 806 goto out_fallback; 807 808 if (!pipe_buf_try_steal(cs->pipe, buf)) 809 goto out_fallback; 810 811 newpage = buf->page; 812 813 if (!PageUptodate(newpage)) 814 SetPageUptodate(newpage); 815 816 ClearPageMappedToDisk(newpage); 817 818 if (fuse_check_page(newpage) != 0) 819 goto out_fallback_unlock; 820 821 /* 822 * This is a new and locked page, it shouldn't be mapped or 823 * have any special flags on it 824 */ 825 if (WARN_ON(page_mapped(oldpage))) 826 goto out_fallback_unlock; 827 if (WARN_ON(page_has_private(oldpage))) 828 goto out_fallback_unlock; 829 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage))) 830 goto out_fallback_unlock; 831 if (WARN_ON(PageMlocked(oldpage))) 832 goto out_fallback_unlock; 833 834 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL); 835 if (err) { 836 unlock_page(newpage); 837 goto out_put_old; 838 } 839 840 get_page(newpage); 841 842 if (!(buf->flags & PIPE_BUF_FLAG_LRU)) 843 lru_cache_add(newpage); 844 845 err = 0; 846 spin_lock(&cs->req->waitq.lock); 847 if (test_bit(FR_ABORTED, &cs->req->flags)) 848 err = -ENOENT; 849 else 850 *pagep = newpage; 851 spin_unlock(&cs->req->waitq.lock); 852 853 if (err) { 854 unlock_page(newpage); 855 put_page(newpage); 856 goto out_put_old; 857 } 858 859 unlock_page(oldpage); 860 /* Drop ref for ap->pages[] array */ 861 put_page(oldpage); 862 cs->len = 0; 863 864 err = 0; 865 out_put_old: 866 /* Drop ref obtained in this function */ 867 put_page(oldpage); 868 return err; 869 870 out_fallback_unlock: 871 unlock_page(newpage); 872 out_fallback: 873 cs->pg = buf->page; 874 cs->offset = buf->offset; 875 876 err = lock_request(cs->req); 877 if (!err) 878 err = 1; 879 880 goto out_put_old; 881 } 882 883 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page, 884 unsigned offset, unsigned count) 885 { 886 struct pipe_buffer *buf; 887 int err; 888 889 if (cs->nr_segs >= cs->pipe->max_usage) 890 return -EIO; 891 892 get_page(page); 893 err = unlock_request(cs->req); 894 if (err) { 895 put_page(page); 896 return err; 897 } 898 899 fuse_copy_finish(cs); 900 901 buf = cs->pipebufs; 902 buf->page = page; 903 buf->offset = offset; 904 buf->len = count; 905 906 cs->pipebufs++; 907 cs->nr_segs++; 908 cs->len = 0; 909 910 return 0; 911 } 912 913 /* 914 * Copy a page in the request to/from the userspace buffer. Must be 915 * done atomically 916 */ 917 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep, 918 unsigned offset, unsigned count, int zeroing) 919 { 920 int err; 921 struct page *page = *pagep; 922 923 if (page && zeroing && count < PAGE_SIZE) 924 clear_highpage(page); 925 926 while (count) { 927 if (cs->write && cs->pipebufs && page) { 928 return fuse_ref_page(cs, page, offset, count); 929 } else if (!cs->len) { 930 if (cs->move_pages && page && 931 offset == 0 && count == PAGE_SIZE) { 932 err = fuse_try_move_page(cs, pagep); 933 if (err <= 0) 934 return err; 935 } else { 936 err = fuse_copy_fill(cs); 937 if (err) 938 return err; 939 } 940 } 941 if (page) { 942 void *mapaddr = kmap_atomic(page); 943 void *buf = mapaddr + offset; 944 offset += fuse_copy_do(cs, &buf, &count); 945 kunmap_atomic(mapaddr); 946 } else 947 offset += fuse_copy_do(cs, NULL, &count); 948 } 949 if (page && !cs->write) 950 flush_dcache_page(page); 951 return 0; 952 } 953 954 /* Copy pages in the request to/from userspace buffer */ 955 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes, 956 int zeroing) 957 { 958 unsigned i; 959 struct fuse_req *req = cs->req; 960 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args); 961 962 963 for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) { 964 int err; 965 unsigned int offset = ap->descs[i].offset; 966 unsigned int count = min(nbytes, ap->descs[i].length); 967 968 err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing); 969 if (err) 970 return err; 971 972 nbytes -= count; 973 } 974 return 0; 975 } 976 977 /* Copy a single argument in the request to/from userspace buffer */ 978 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size) 979 { 980 while (size) { 981 if (!cs->len) { 982 int err = fuse_copy_fill(cs); 983 if (err) 984 return err; 985 } 986 fuse_copy_do(cs, &val, &size); 987 } 988 return 0; 989 } 990 991 /* Copy request arguments to/from userspace buffer */ 992 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, 993 unsigned argpages, struct fuse_arg *args, 994 int zeroing) 995 { 996 int err = 0; 997 unsigned i; 998 999 for (i = 0; !err && i < numargs; i++) { 1000 struct fuse_arg *arg = &args[i]; 1001 if (i == numargs - 1 && argpages) 1002 err = fuse_copy_pages(cs, arg->size, zeroing); 1003 else 1004 err = fuse_copy_one(cs, arg->value, arg->size); 1005 } 1006 return err; 1007 } 1008 1009 static int forget_pending(struct fuse_iqueue *fiq) 1010 { 1011 return fiq->forget_list_head.next != NULL; 1012 } 1013 1014 static int request_pending(struct fuse_iqueue *fiq) 1015 { 1016 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) || 1017 forget_pending(fiq); 1018 } 1019 1020 /* 1021 * Transfer an interrupt request to userspace 1022 * 1023 * Unlike other requests this is assembled on demand, without a need 1024 * to allocate a separate fuse_req structure. 1025 * 1026 * Called with fiq->lock held, releases it 1027 */ 1028 static int fuse_read_interrupt(struct fuse_iqueue *fiq, 1029 struct fuse_copy_state *cs, 1030 size_t nbytes, struct fuse_req *req) 1031 __releases(fiq->lock) 1032 { 1033 struct fuse_in_header ih; 1034 struct fuse_interrupt_in arg; 1035 unsigned reqsize = sizeof(ih) + sizeof(arg); 1036 int err; 1037 1038 list_del_init(&req->intr_entry); 1039 memset(&ih, 0, sizeof(ih)); 1040 memset(&arg, 0, sizeof(arg)); 1041 ih.len = reqsize; 1042 ih.opcode = FUSE_INTERRUPT; 1043 ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT); 1044 arg.unique = req->in.h.unique; 1045 1046 spin_unlock(&fiq->lock); 1047 if (nbytes < reqsize) 1048 return -EINVAL; 1049 1050 err = fuse_copy_one(cs, &ih, sizeof(ih)); 1051 if (!err) 1052 err = fuse_copy_one(cs, &arg, sizeof(arg)); 1053 fuse_copy_finish(cs); 1054 1055 return err ? err : reqsize; 1056 } 1057 1058 struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq, 1059 unsigned int max, 1060 unsigned int *countp) 1061 { 1062 struct fuse_forget_link *head = fiq->forget_list_head.next; 1063 struct fuse_forget_link **newhead = &head; 1064 unsigned count; 1065 1066 for (count = 0; *newhead != NULL && count < max; count++) 1067 newhead = &(*newhead)->next; 1068 1069 fiq->forget_list_head.next = *newhead; 1070 *newhead = NULL; 1071 if (fiq->forget_list_head.next == NULL) 1072 fiq->forget_list_tail = &fiq->forget_list_head; 1073 1074 if (countp != NULL) 1075 *countp = count; 1076 1077 return head; 1078 } 1079 EXPORT_SYMBOL(fuse_dequeue_forget); 1080 1081 static int fuse_read_single_forget(struct fuse_iqueue *fiq, 1082 struct fuse_copy_state *cs, 1083 size_t nbytes) 1084 __releases(fiq->lock) 1085 { 1086 int err; 1087 struct fuse_forget_link *forget = fuse_dequeue_forget(fiq, 1, NULL); 1088 struct fuse_forget_in arg = { 1089 .nlookup = forget->forget_one.nlookup, 1090 }; 1091 struct fuse_in_header ih = { 1092 .opcode = FUSE_FORGET, 1093 .nodeid = forget->forget_one.nodeid, 1094 .unique = fuse_get_unique(fiq), 1095 .len = sizeof(ih) + sizeof(arg), 1096 }; 1097 1098 spin_unlock(&fiq->lock); 1099 kfree(forget); 1100 if (nbytes < ih.len) 1101 return -EINVAL; 1102 1103 err = fuse_copy_one(cs, &ih, sizeof(ih)); 1104 if (!err) 1105 err = fuse_copy_one(cs, &arg, sizeof(arg)); 1106 fuse_copy_finish(cs); 1107 1108 if (err) 1109 return err; 1110 1111 return ih.len; 1112 } 1113 1114 static int fuse_read_batch_forget(struct fuse_iqueue *fiq, 1115 struct fuse_copy_state *cs, size_t nbytes) 1116 __releases(fiq->lock) 1117 { 1118 int err; 1119 unsigned max_forgets; 1120 unsigned count; 1121 struct fuse_forget_link *head; 1122 struct fuse_batch_forget_in arg = { .count = 0 }; 1123 struct fuse_in_header ih = { 1124 .opcode = FUSE_BATCH_FORGET, 1125 .unique = fuse_get_unique(fiq), 1126 .len = sizeof(ih) + sizeof(arg), 1127 }; 1128 1129 if (nbytes < ih.len) { 1130 spin_unlock(&fiq->lock); 1131 return -EINVAL; 1132 } 1133 1134 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one); 1135 head = fuse_dequeue_forget(fiq, max_forgets, &count); 1136 spin_unlock(&fiq->lock); 1137 1138 arg.count = count; 1139 ih.len += count * sizeof(struct fuse_forget_one); 1140 err = fuse_copy_one(cs, &ih, sizeof(ih)); 1141 if (!err) 1142 err = fuse_copy_one(cs, &arg, sizeof(arg)); 1143 1144 while (head) { 1145 struct fuse_forget_link *forget = head; 1146 1147 if (!err) { 1148 err = fuse_copy_one(cs, &forget->forget_one, 1149 sizeof(forget->forget_one)); 1150 } 1151 head = forget->next; 1152 kfree(forget); 1153 } 1154 1155 fuse_copy_finish(cs); 1156 1157 if (err) 1158 return err; 1159 1160 return ih.len; 1161 } 1162 1163 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq, 1164 struct fuse_copy_state *cs, 1165 size_t nbytes) 1166 __releases(fiq->lock) 1167 { 1168 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL) 1169 return fuse_read_single_forget(fiq, cs, nbytes); 1170 else 1171 return fuse_read_batch_forget(fiq, cs, nbytes); 1172 } 1173 1174 /* 1175 * Read a single request into the userspace filesystem's buffer. This 1176 * function waits until a request is available, then removes it from 1177 * the pending list and copies request data to userspace buffer. If 1178 * no reply is needed (FORGET) or request has been aborted or there 1179 * was an error during the copying then it's finished by calling 1180 * fuse_request_end(). Otherwise add it to the processing list, and set 1181 * the 'sent' flag. 1182 */ 1183 static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file, 1184 struct fuse_copy_state *cs, size_t nbytes) 1185 { 1186 ssize_t err; 1187 struct fuse_conn *fc = fud->fc; 1188 struct fuse_iqueue *fiq = &fc->iq; 1189 struct fuse_pqueue *fpq = &fud->pq; 1190 struct fuse_req *req; 1191 struct fuse_args *args; 1192 unsigned reqsize; 1193 unsigned int hash; 1194 1195 /* 1196 * Require sane minimum read buffer - that has capacity for fixed part 1197 * of any request header + negotiated max_write room for data. 1198 * 1199 * Historically libfuse reserves 4K for fixed header room, but e.g. 1200 * GlusterFS reserves only 80 bytes 1201 * 1202 * = `sizeof(fuse_in_header) + sizeof(fuse_write_in)` 1203 * 1204 * which is the absolute minimum any sane filesystem should be using 1205 * for header room. 1206 */ 1207 if (nbytes < max_t(size_t, FUSE_MIN_READ_BUFFER, 1208 sizeof(struct fuse_in_header) + 1209 sizeof(struct fuse_write_in) + 1210 fc->max_write)) 1211 return -EINVAL; 1212 1213 restart: 1214 for (;;) { 1215 spin_lock(&fiq->lock); 1216 if (!fiq->connected || request_pending(fiq)) 1217 break; 1218 spin_unlock(&fiq->lock); 1219 1220 if (file->f_flags & O_NONBLOCK) 1221 return -EAGAIN; 1222 err = wait_event_interruptible_exclusive(fiq->waitq, 1223 !fiq->connected || request_pending(fiq)); 1224 if (err) 1225 return err; 1226 } 1227 1228 if (!fiq->connected) { 1229 err = fc->aborted ? -ECONNABORTED : -ENODEV; 1230 goto err_unlock; 1231 } 1232 1233 if (!list_empty(&fiq->interrupts)) { 1234 req = list_entry(fiq->interrupts.next, struct fuse_req, 1235 intr_entry); 1236 return fuse_read_interrupt(fiq, cs, nbytes, req); 1237 } 1238 1239 if (forget_pending(fiq)) { 1240 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0) 1241 return fuse_read_forget(fc, fiq, cs, nbytes); 1242 1243 if (fiq->forget_batch <= -8) 1244 fiq->forget_batch = 16; 1245 } 1246 1247 req = list_entry(fiq->pending.next, struct fuse_req, list); 1248 clear_bit(FR_PENDING, &req->flags); 1249 list_del_init(&req->list); 1250 spin_unlock(&fiq->lock); 1251 1252 args = req->args; 1253 reqsize = req->in.h.len; 1254 1255 /* If request is too large, reply with an error and restart the read */ 1256 if (nbytes < reqsize) { 1257 req->out.h.error = -EIO; 1258 /* SETXATTR is special, since it may contain too large data */ 1259 if (args->opcode == FUSE_SETXATTR) 1260 req->out.h.error = -E2BIG; 1261 fuse_request_end(fc, req); 1262 goto restart; 1263 } 1264 spin_lock(&fpq->lock); 1265 list_add(&req->list, &fpq->io); 1266 spin_unlock(&fpq->lock); 1267 cs->req = req; 1268 err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h)); 1269 if (!err) 1270 err = fuse_copy_args(cs, args->in_numargs, args->in_pages, 1271 (struct fuse_arg *) args->in_args, 0); 1272 fuse_copy_finish(cs); 1273 spin_lock(&fpq->lock); 1274 clear_bit(FR_LOCKED, &req->flags); 1275 if (!fpq->connected) { 1276 err = fc->aborted ? -ECONNABORTED : -ENODEV; 1277 goto out_end; 1278 } 1279 if (err) { 1280 req->out.h.error = -EIO; 1281 goto out_end; 1282 } 1283 if (!test_bit(FR_ISREPLY, &req->flags)) { 1284 err = reqsize; 1285 goto out_end; 1286 } 1287 hash = fuse_req_hash(req->in.h.unique); 1288 list_move_tail(&req->list, &fpq->processing[hash]); 1289 __fuse_get_request(req); 1290 set_bit(FR_SENT, &req->flags); 1291 spin_unlock(&fpq->lock); 1292 /* matches barrier in request_wait_answer() */ 1293 smp_mb__after_atomic(); 1294 if (test_bit(FR_INTERRUPTED, &req->flags)) 1295 queue_interrupt(fiq, req); 1296 fuse_put_request(fc, req); 1297 1298 return reqsize; 1299 1300 out_end: 1301 if (!test_bit(FR_PRIVATE, &req->flags)) 1302 list_del_init(&req->list); 1303 spin_unlock(&fpq->lock); 1304 fuse_request_end(fc, req); 1305 return err; 1306 1307 err_unlock: 1308 spin_unlock(&fiq->lock); 1309 return err; 1310 } 1311 1312 static int fuse_dev_open(struct inode *inode, struct file *file) 1313 { 1314 /* 1315 * The fuse device's file's private_data is used to hold 1316 * the fuse_conn(ection) when it is mounted, and is used to 1317 * keep track of whether the file has been mounted already. 1318 */ 1319 file->private_data = NULL; 1320 return 0; 1321 } 1322 1323 static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to) 1324 { 1325 struct fuse_copy_state cs; 1326 struct file *file = iocb->ki_filp; 1327 struct fuse_dev *fud = fuse_get_dev(file); 1328 1329 if (!fud) 1330 return -EPERM; 1331 1332 if (!iter_is_iovec(to)) 1333 return -EINVAL; 1334 1335 fuse_copy_init(&cs, 1, to); 1336 1337 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to)); 1338 } 1339 1340 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, 1341 struct pipe_inode_info *pipe, 1342 size_t len, unsigned int flags) 1343 { 1344 int total, ret; 1345 int page_nr = 0; 1346 struct pipe_buffer *bufs; 1347 struct fuse_copy_state cs; 1348 struct fuse_dev *fud = fuse_get_dev(in); 1349 1350 if (!fud) 1351 return -EPERM; 1352 1353 bufs = kvmalloc_array(pipe->max_usage, sizeof(struct pipe_buffer), 1354 GFP_KERNEL); 1355 if (!bufs) 1356 return -ENOMEM; 1357 1358 fuse_copy_init(&cs, 1, NULL); 1359 cs.pipebufs = bufs; 1360 cs.pipe = pipe; 1361 ret = fuse_dev_do_read(fud, in, &cs, len); 1362 if (ret < 0) 1363 goto out; 1364 1365 if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) { 1366 ret = -EIO; 1367 goto out; 1368 } 1369 1370 for (ret = total = 0; page_nr < cs.nr_segs; total += ret) { 1371 /* 1372 * Need to be careful about this. Having buf->ops in module 1373 * code can Oops if the buffer persists after module unload. 1374 */ 1375 bufs[page_nr].ops = &nosteal_pipe_buf_ops; 1376 bufs[page_nr].flags = 0; 1377 ret = add_to_pipe(pipe, &bufs[page_nr++]); 1378 if (unlikely(ret < 0)) 1379 break; 1380 } 1381 if (total) 1382 ret = total; 1383 out: 1384 for (; page_nr < cs.nr_segs; page_nr++) 1385 put_page(bufs[page_nr].page); 1386 1387 kvfree(bufs); 1388 return ret; 1389 } 1390 1391 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, 1392 struct fuse_copy_state *cs) 1393 { 1394 struct fuse_notify_poll_wakeup_out outarg; 1395 int err = -EINVAL; 1396 1397 if (size != sizeof(outarg)) 1398 goto err; 1399 1400 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1401 if (err) 1402 goto err; 1403 1404 fuse_copy_finish(cs); 1405 return fuse_notify_poll_wakeup(fc, &outarg); 1406 1407 err: 1408 fuse_copy_finish(cs); 1409 return err; 1410 } 1411 1412 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size, 1413 struct fuse_copy_state *cs) 1414 { 1415 struct fuse_notify_inval_inode_out outarg; 1416 int err = -EINVAL; 1417 1418 if (size != sizeof(outarg)) 1419 goto err; 1420 1421 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1422 if (err) 1423 goto err; 1424 fuse_copy_finish(cs); 1425 1426 down_read(&fc->killsb); 1427 err = -ENOENT; 1428 if (fc->sb) { 1429 err = fuse_reverse_inval_inode(fc->sb, outarg.ino, 1430 outarg.off, outarg.len); 1431 } 1432 up_read(&fc->killsb); 1433 return err; 1434 1435 err: 1436 fuse_copy_finish(cs); 1437 return err; 1438 } 1439 1440 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, 1441 struct fuse_copy_state *cs) 1442 { 1443 struct fuse_notify_inval_entry_out outarg; 1444 int err = -ENOMEM; 1445 char *buf; 1446 struct qstr name; 1447 1448 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL); 1449 if (!buf) 1450 goto err; 1451 1452 err = -EINVAL; 1453 if (size < sizeof(outarg)) 1454 goto err; 1455 1456 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1457 if (err) 1458 goto err; 1459 1460 err = -ENAMETOOLONG; 1461 if (outarg.namelen > FUSE_NAME_MAX) 1462 goto err; 1463 1464 err = -EINVAL; 1465 if (size != sizeof(outarg) + outarg.namelen + 1) 1466 goto err; 1467 1468 name.name = buf; 1469 name.len = outarg.namelen; 1470 err = fuse_copy_one(cs, buf, outarg.namelen + 1); 1471 if (err) 1472 goto err; 1473 fuse_copy_finish(cs); 1474 buf[outarg.namelen] = 0; 1475 1476 down_read(&fc->killsb); 1477 err = -ENOENT; 1478 if (fc->sb) 1479 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name); 1480 up_read(&fc->killsb); 1481 kfree(buf); 1482 return err; 1483 1484 err: 1485 kfree(buf); 1486 fuse_copy_finish(cs); 1487 return err; 1488 } 1489 1490 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size, 1491 struct fuse_copy_state *cs) 1492 { 1493 struct fuse_notify_delete_out outarg; 1494 int err = -ENOMEM; 1495 char *buf; 1496 struct qstr name; 1497 1498 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL); 1499 if (!buf) 1500 goto err; 1501 1502 err = -EINVAL; 1503 if (size < sizeof(outarg)) 1504 goto err; 1505 1506 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1507 if (err) 1508 goto err; 1509 1510 err = -ENAMETOOLONG; 1511 if (outarg.namelen > FUSE_NAME_MAX) 1512 goto err; 1513 1514 err = -EINVAL; 1515 if (size != sizeof(outarg) + outarg.namelen + 1) 1516 goto err; 1517 1518 name.name = buf; 1519 name.len = outarg.namelen; 1520 err = fuse_copy_one(cs, buf, outarg.namelen + 1); 1521 if (err) 1522 goto err; 1523 fuse_copy_finish(cs); 1524 buf[outarg.namelen] = 0; 1525 1526 down_read(&fc->killsb); 1527 err = -ENOENT; 1528 if (fc->sb) 1529 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 1530 outarg.child, &name); 1531 up_read(&fc->killsb); 1532 kfree(buf); 1533 return err; 1534 1535 err: 1536 kfree(buf); 1537 fuse_copy_finish(cs); 1538 return err; 1539 } 1540 1541 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size, 1542 struct fuse_copy_state *cs) 1543 { 1544 struct fuse_notify_store_out outarg; 1545 struct inode *inode; 1546 struct address_space *mapping; 1547 u64 nodeid; 1548 int err; 1549 pgoff_t index; 1550 unsigned int offset; 1551 unsigned int num; 1552 loff_t file_size; 1553 loff_t end; 1554 1555 err = -EINVAL; 1556 if (size < sizeof(outarg)) 1557 goto out_finish; 1558 1559 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1560 if (err) 1561 goto out_finish; 1562 1563 err = -EINVAL; 1564 if (size - sizeof(outarg) != outarg.size) 1565 goto out_finish; 1566 1567 nodeid = outarg.nodeid; 1568 1569 down_read(&fc->killsb); 1570 1571 err = -ENOENT; 1572 if (!fc->sb) 1573 goto out_up_killsb; 1574 1575 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid); 1576 if (!inode) 1577 goto out_up_killsb; 1578 1579 mapping = inode->i_mapping; 1580 index = outarg.offset >> PAGE_SHIFT; 1581 offset = outarg.offset & ~PAGE_MASK; 1582 file_size = i_size_read(inode); 1583 end = outarg.offset + outarg.size; 1584 if (end > file_size) { 1585 file_size = end; 1586 fuse_write_update_size(inode, file_size); 1587 } 1588 1589 num = outarg.size; 1590 while (num) { 1591 struct page *page; 1592 unsigned int this_num; 1593 1594 err = -ENOMEM; 1595 page = find_or_create_page(mapping, index, 1596 mapping_gfp_mask(mapping)); 1597 if (!page) 1598 goto out_iput; 1599 1600 this_num = min_t(unsigned, num, PAGE_SIZE - offset); 1601 err = fuse_copy_page(cs, &page, offset, this_num, 0); 1602 if (!err && offset == 0 && 1603 (this_num == PAGE_SIZE || file_size == end)) 1604 SetPageUptodate(page); 1605 unlock_page(page); 1606 put_page(page); 1607 1608 if (err) 1609 goto out_iput; 1610 1611 num -= this_num; 1612 offset = 0; 1613 index++; 1614 } 1615 1616 err = 0; 1617 1618 out_iput: 1619 iput(inode); 1620 out_up_killsb: 1621 up_read(&fc->killsb); 1622 out_finish: 1623 fuse_copy_finish(cs); 1624 return err; 1625 } 1626 1627 struct fuse_retrieve_args { 1628 struct fuse_args_pages ap; 1629 struct fuse_notify_retrieve_in inarg; 1630 }; 1631 1632 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_args *args, 1633 int error) 1634 { 1635 struct fuse_retrieve_args *ra = 1636 container_of(args, typeof(*ra), ap.args); 1637 1638 release_pages(ra->ap.pages, ra->ap.num_pages); 1639 kfree(ra); 1640 } 1641 1642 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, 1643 struct fuse_notify_retrieve_out *outarg) 1644 { 1645 int err; 1646 struct address_space *mapping = inode->i_mapping; 1647 pgoff_t index; 1648 loff_t file_size; 1649 unsigned int num; 1650 unsigned int offset; 1651 size_t total_len = 0; 1652 unsigned int num_pages; 1653 struct fuse_retrieve_args *ra; 1654 size_t args_size = sizeof(*ra); 1655 struct fuse_args_pages *ap; 1656 struct fuse_args *args; 1657 1658 offset = outarg->offset & ~PAGE_MASK; 1659 file_size = i_size_read(inode); 1660 1661 num = min(outarg->size, fc->max_write); 1662 if (outarg->offset > file_size) 1663 num = 0; 1664 else if (outarg->offset + num > file_size) 1665 num = file_size - outarg->offset; 1666 1667 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 1668 num_pages = min(num_pages, fc->max_pages); 1669 1670 args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0])); 1671 1672 ra = kzalloc(args_size, GFP_KERNEL); 1673 if (!ra) 1674 return -ENOMEM; 1675 1676 ap = &ra->ap; 1677 ap->pages = (void *) (ra + 1); 1678 ap->descs = (void *) (ap->pages + num_pages); 1679 1680 args = &ap->args; 1681 args->nodeid = outarg->nodeid; 1682 args->opcode = FUSE_NOTIFY_REPLY; 1683 args->in_numargs = 2; 1684 args->in_pages = true; 1685 args->end = fuse_retrieve_end; 1686 1687 index = outarg->offset >> PAGE_SHIFT; 1688 1689 while (num && ap->num_pages < num_pages) { 1690 struct page *page; 1691 unsigned int this_num; 1692 1693 page = find_get_page(mapping, index); 1694 if (!page) 1695 break; 1696 1697 this_num = min_t(unsigned, num, PAGE_SIZE - offset); 1698 ap->pages[ap->num_pages] = page; 1699 ap->descs[ap->num_pages].offset = offset; 1700 ap->descs[ap->num_pages].length = this_num; 1701 ap->num_pages++; 1702 1703 offset = 0; 1704 num -= this_num; 1705 total_len += this_num; 1706 index++; 1707 } 1708 ra->inarg.offset = outarg->offset; 1709 ra->inarg.size = total_len; 1710 args->in_args[0].size = sizeof(ra->inarg); 1711 args->in_args[0].value = &ra->inarg; 1712 args->in_args[1].size = total_len; 1713 1714 err = fuse_simple_notify_reply(fc, args, outarg->notify_unique); 1715 if (err) 1716 fuse_retrieve_end(fc, args, err); 1717 1718 return err; 1719 } 1720 1721 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size, 1722 struct fuse_copy_state *cs) 1723 { 1724 struct fuse_notify_retrieve_out outarg; 1725 struct inode *inode; 1726 int err; 1727 1728 err = -EINVAL; 1729 if (size != sizeof(outarg)) 1730 goto copy_finish; 1731 1732 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1733 if (err) 1734 goto copy_finish; 1735 1736 fuse_copy_finish(cs); 1737 1738 down_read(&fc->killsb); 1739 err = -ENOENT; 1740 if (fc->sb) { 1741 u64 nodeid = outarg.nodeid; 1742 1743 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid); 1744 if (inode) { 1745 err = fuse_retrieve(fc, inode, &outarg); 1746 iput(inode); 1747 } 1748 } 1749 up_read(&fc->killsb); 1750 1751 return err; 1752 1753 copy_finish: 1754 fuse_copy_finish(cs); 1755 return err; 1756 } 1757 1758 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, 1759 unsigned int size, struct fuse_copy_state *cs) 1760 { 1761 /* Don't try to move pages (yet) */ 1762 cs->move_pages = 0; 1763 1764 switch (code) { 1765 case FUSE_NOTIFY_POLL: 1766 return fuse_notify_poll(fc, size, cs); 1767 1768 case FUSE_NOTIFY_INVAL_INODE: 1769 return fuse_notify_inval_inode(fc, size, cs); 1770 1771 case FUSE_NOTIFY_INVAL_ENTRY: 1772 return fuse_notify_inval_entry(fc, size, cs); 1773 1774 case FUSE_NOTIFY_STORE: 1775 return fuse_notify_store(fc, size, cs); 1776 1777 case FUSE_NOTIFY_RETRIEVE: 1778 return fuse_notify_retrieve(fc, size, cs); 1779 1780 case FUSE_NOTIFY_DELETE: 1781 return fuse_notify_delete(fc, size, cs); 1782 1783 default: 1784 fuse_copy_finish(cs); 1785 return -EINVAL; 1786 } 1787 } 1788 1789 /* Look up request on processing list by unique ID */ 1790 static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique) 1791 { 1792 unsigned int hash = fuse_req_hash(unique); 1793 struct fuse_req *req; 1794 1795 list_for_each_entry(req, &fpq->processing[hash], list) { 1796 if (req->in.h.unique == unique) 1797 return req; 1798 } 1799 return NULL; 1800 } 1801 1802 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args, 1803 unsigned nbytes) 1804 { 1805 unsigned reqsize = sizeof(struct fuse_out_header); 1806 1807 reqsize += fuse_len_args(args->out_numargs, args->out_args); 1808 1809 if (reqsize < nbytes || (reqsize > nbytes && !args->out_argvar)) 1810 return -EINVAL; 1811 else if (reqsize > nbytes) { 1812 struct fuse_arg *lastarg = &args->out_args[args->out_numargs-1]; 1813 unsigned diffsize = reqsize - nbytes; 1814 1815 if (diffsize > lastarg->size) 1816 return -EINVAL; 1817 lastarg->size -= diffsize; 1818 } 1819 return fuse_copy_args(cs, args->out_numargs, args->out_pages, 1820 args->out_args, args->page_zeroing); 1821 } 1822 1823 /* 1824 * Write a single reply to a request. First the header is copied from 1825 * the write buffer. The request is then searched on the processing 1826 * list by the unique ID found in the header. If found, then remove 1827 * it from the list and copy the rest of the buffer to the request. 1828 * The request is finished by calling fuse_request_end(). 1829 */ 1830 static ssize_t fuse_dev_do_write(struct fuse_dev *fud, 1831 struct fuse_copy_state *cs, size_t nbytes) 1832 { 1833 int err; 1834 struct fuse_conn *fc = fud->fc; 1835 struct fuse_pqueue *fpq = &fud->pq; 1836 struct fuse_req *req; 1837 struct fuse_out_header oh; 1838 1839 err = -EINVAL; 1840 if (nbytes < sizeof(struct fuse_out_header)) 1841 goto out; 1842 1843 err = fuse_copy_one(cs, &oh, sizeof(oh)); 1844 if (err) 1845 goto copy_finish; 1846 1847 err = -EINVAL; 1848 if (oh.len != nbytes) 1849 goto copy_finish; 1850 1851 /* 1852 * Zero oh.unique indicates unsolicited notification message 1853 * and error contains notification code. 1854 */ 1855 if (!oh.unique) { 1856 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs); 1857 goto out; 1858 } 1859 1860 err = -EINVAL; 1861 if (oh.error <= -1000 || oh.error > 0) 1862 goto copy_finish; 1863 1864 spin_lock(&fpq->lock); 1865 req = NULL; 1866 if (fpq->connected) 1867 req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT); 1868 1869 err = -ENOENT; 1870 if (!req) { 1871 spin_unlock(&fpq->lock); 1872 goto copy_finish; 1873 } 1874 1875 /* Is it an interrupt reply ID? */ 1876 if (oh.unique & FUSE_INT_REQ_BIT) { 1877 __fuse_get_request(req); 1878 spin_unlock(&fpq->lock); 1879 1880 err = 0; 1881 if (nbytes != sizeof(struct fuse_out_header)) 1882 err = -EINVAL; 1883 else if (oh.error == -ENOSYS) 1884 fc->no_interrupt = 1; 1885 else if (oh.error == -EAGAIN) 1886 err = queue_interrupt(&fc->iq, req); 1887 1888 fuse_put_request(fc, req); 1889 1890 goto copy_finish; 1891 } 1892 1893 clear_bit(FR_SENT, &req->flags); 1894 list_move(&req->list, &fpq->io); 1895 req->out.h = oh; 1896 set_bit(FR_LOCKED, &req->flags); 1897 spin_unlock(&fpq->lock); 1898 cs->req = req; 1899 if (!req->args->page_replace) 1900 cs->move_pages = 0; 1901 1902 if (oh.error) 1903 err = nbytes != sizeof(oh) ? -EINVAL : 0; 1904 else 1905 err = copy_out_args(cs, req->args, nbytes); 1906 fuse_copy_finish(cs); 1907 1908 spin_lock(&fpq->lock); 1909 clear_bit(FR_LOCKED, &req->flags); 1910 if (!fpq->connected) 1911 err = -ENOENT; 1912 else if (err) 1913 req->out.h.error = -EIO; 1914 if (!test_bit(FR_PRIVATE, &req->flags)) 1915 list_del_init(&req->list); 1916 spin_unlock(&fpq->lock); 1917 1918 fuse_request_end(fc, req); 1919 out: 1920 return err ? err : nbytes; 1921 1922 copy_finish: 1923 fuse_copy_finish(cs); 1924 goto out; 1925 } 1926 1927 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from) 1928 { 1929 struct fuse_copy_state cs; 1930 struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp); 1931 1932 if (!fud) 1933 return -EPERM; 1934 1935 if (!iter_is_iovec(from)) 1936 return -EINVAL; 1937 1938 fuse_copy_init(&cs, 0, from); 1939 1940 return fuse_dev_do_write(fud, &cs, iov_iter_count(from)); 1941 } 1942 1943 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, 1944 struct file *out, loff_t *ppos, 1945 size_t len, unsigned int flags) 1946 { 1947 unsigned int head, tail, mask, count; 1948 unsigned nbuf; 1949 unsigned idx; 1950 struct pipe_buffer *bufs; 1951 struct fuse_copy_state cs; 1952 struct fuse_dev *fud; 1953 size_t rem; 1954 ssize_t ret; 1955 1956 fud = fuse_get_dev(out); 1957 if (!fud) 1958 return -EPERM; 1959 1960 pipe_lock(pipe); 1961 1962 head = pipe->head; 1963 tail = pipe->tail; 1964 mask = pipe->ring_size - 1; 1965 count = head - tail; 1966 1967 bufs = kvmalloc_array(count, sizeof(struct pipe_buffer), GFP_KERNEL); 1968 if (!bufs) { 1969 pipe_unlock(pipe); 1970 return -ENOMEM; 1971 } 1972 1973 nbuf = 0; 1974 rem = 0; 1975 for (idx = tail; idx != head && rem < len; idx++) 1976 rem += pipe->bufs[idx & mask].len; 1977 1978 ret = -EINVAL; 1979 if (rem < len) 1980 goto out_free; 1981 1982 rem = len; 1983 while (rem) { 1984 struct pipe_buffer *ibuf; 1985 struct pipe_buffer *obuf; 1986 1987 if (WARN_ON(nbuf >= count || tail == head)) 1988 goto out_free; 1989 1990 ibuf = &pipe->bufs[tail & mask]; 1991 obuf = &bufs[nbuf]; 1992 1993 if (rem >= ibuf->len) { 1994 *obuf = *ibuf; 1995 ibuf->ops = NULL; 1996 tail++; 1997 pipe->tail = tail; 1998 } else { 1999 if (!pipe_buf_get(pipe, ibuf)) 2000 goto out_free; 2001 2002 *obuf = *ibuf; 2003 obuf->flags &= ~PIPE_BUF_FLAG_GIFT; 2004 obuf->len = rem; 2005 ibuf->offset += obuf->len; 2006 ibuf->len -= obuf->len; 2007 } 2008 nbuf++; 2009 rem -= obuf->len; 2010 } 2011 pipe_unlock(pipe); 2012 2013 fuse_copy_init(&cs, 0, NULL); 2014 cs.pipebufs = bufs; 2015 cs.nr_segs = nbuf; 2016 cs.pipe = pipe; 2017 2018 if (flags & SPLICE_F_MOVE) 2019 cs.move_pages = 1; 2020 2021 ret = fuse_dev_do_write(fud, &cs, len); 2022 2023 pipe_lock(pipe); 2024 out_free: 2025 for (idx = 0; idx < nbuf; idx++) 2026 pipe_buf_release(pipe, &bufs[idx]); 2027 pipe_unlock(pipe); 2028 2029 kvfree(bufs); 2030 return ret; 2031 } 2032 2033 static __poll_t fuse_dev_poll(struct file *file, poll_table *wait) 2034 { 2035 __poll_t mask = EPOLLOUT | EPOLLWRNORM; 2036 struct fuse_iqueue *fiq; 2037 struct fuse_dev *fud = fuse_get_dev(file); 2038 2039 if (!fud) 2040 return EPOLLERR; 2041 2042 fiq = &fud->fc->iq; 2043 poll_wait(file, &fiq->waitq, wait); 2044 2045 spin_lock(&fiq->lock); 2046 if (!fiq->connected) 2047 mask = EPOLLERR; 2048 else if (request_pending(fiq)) 2049 mask |= EPOLLIN | EPOLLRDNORM; 2050 spin_unlock(&fiq->lock); 2051 2052 return mask; 2053 } 2054 2055 /* Abort all requests on the given list (pending or processing) */ 2056 static void end_requests(struct fuse_conn *fc, struct list_head *head) 2057 { 2058 while (!list_empty(head)) { 2059 struct fuse_req *req; 2060 req = list_entry(head->next, struct fuse_req, list); 2061 req->out.h.error = -ECONNABORTED; 2062 clear_bit(FR_SENT, &req->flags); 2063 list_del_init(&req->list); 2064 fuse_request_end(fc, req); 2065 } 2066 } 2067 2068 static void end_polls(struct fuse_conn *fc) 2069 { 2070 struct rb_node *p; 2071 2072 p = rb_first(&fc->polled_files); 2073 2074 while (p) { 2075 struct fuse_file *ff; 2076 ff = rb_entry(p, struct fuse_file, polled_node); 2077 wake_up_interruptible_all(&ff->poll_wait); 2078 2079 p = rb_next(p); 2080 } 2081 } 2082 2083 /* 2084 * Abort all requests. 2085 * 2086 * Emergency exit in case of a malicious or accidental deadlock, or just a hung 2087 * filesystem. 2088 * 2089 * The same effect is usually achievable through killing the filesystem daemon 2090 * and all users of the filesystem. The exception is the combination of an 2091 * asynchronous request and the tricky deadlock (see 2092 * Documentation/filesystems/fuse.rst). 2093 * 2094 * Aborting requests under I/O goes as follows: 1: Separate out unlocked 2095 * requests, they should be finished off immediately. Locked requests will be 2096 * finished after unlock; see unlock_request(). 2: Finish off the unlocked 2097 * requests. It is possible that some request will finish before we can. This 2098 * is OK, the request will in that case be removed from the list before we touch 2099 * it. 2100 */ 2101 void fuse_abort_conn(struct fuse_conn *fc) 2102 { 2103 struct fuse_iqueue *fiq = &fc->iq; 2104 2105 spin_lock(&fc->lock); 2106 if (fc->connected) { 2107 struct fuse_dev *fud; 2108 struct fuse_req *req, *next; 2109 LIST_HEAD(to_end); 2110 unsigned int i; 2111 2112 /* Background queuing checks fc->connected under bg_lock */ 2113 spin_lock(&fc->bg_lock); 2114 fc->connected = 0; 2115 spin_unlock(&fc->bg_lock); 2116 2117 fuse_set_initialized(fc); 2118 list_for_each_entry(fud, &fc->devices, entry) { 2119 struct fuse_pqueue *fpq = &fud->pq; 2120 2121 spin_lock(&fpq->lock); 2122 fpq->connected = 0; 2123 list_for_each_entry_safe(req, next, &fpq->io, list) { 2124 req->out.h.error = -ECONNABORTED; 2125 spin_lock(&req->waitq.lock); 2126 set_bit(FR_ABORTED, &req->flags); 2127 if (!test_bit(FR_LOCKED, &req->flags)) { 2128 set_bit(FR_PRIVATE, &req->flags); 2129 __fuse_get_request(req); 2130 list_move(&req->list, &to_end); 2131 } 2132 spin_unlock(&req->waitq.lock); 2133 } 2134 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) 2135 list_splice_tail_init(&fpq->processing[i], 2136 &to_end); 2137 spin_unlock(&fpq->lock); 2138 } 2139 spin_lock(&fc->bg_lock); 2140 fc->blocked = 0; 2141 fc->max_background = UINT_MAX; 2142 flush_bg_queue(fc); 2143 spin_unlock(&fc->bg_lock); 2144 2145 spin_lock(&fiq->lock); 2146 fiq->connected = 0; 2147 list_for_each_entry(req, &fiq->pending, list) 2148 clear_bit(FR_PENDING, &req->flags); 2149 list_splice_tail_init(&fiq->pending, &to_end); 2150 while (forget_pending(fiq)) 2151 kfree(fuse_dequeue_forget(fiq, 1, NULL)); 2152 wake_up_all(&fiq->waitq); 2153 spin_unlock(&fiq->lock); 2154 kill_fasync(&fiq->fasync, SIGIO, POLL_IN); 2155 end_polls(fc); 2156 wake_up_all(&fc->blocked_waitq); 2157 spin_unlock(&fc->lock); 2158 2159 end_requests(fc, &to_end); 2160 } else { 2161 spin_unlock(&fc->lock); 2162 } 2163 } 2164 EXPORT_SYMBOL_GPL(fuse_abort_conn); 2165 2166 void fuse_wait_aborted(struct fuse_conn *fc) 2167 { 2168 /* matches implicit memory barrier in fuse_drop_waiting() */ 2169 smp_mb(); 2170 wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0); 2171 } 2172 2173 int fuse_dev_release(struct inode *inode, struct file *file) 2174 { 2175 struct fuse_dev *fud = fuse_get_dev(file); 2176 2177 if (fud) { 2178 struct fuse_conn *fc = fud->fc; 2179 struct fuse_pqueue *fpq = &fud->pq; 2180 LIST_HEAD(to_end); 2181 unsigned int i; 2182 2183 spin_lock(&fpq->lock); 2184 WARN_ON(!list_empty(&fpq->io)); 2185 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) 2186 list_splice_init(&fpq->processing[i], &to_end); 2187 spin_unlock(&fpq->lock); 2188 2189 end_requests(fc, &to_end); 2190 2191 /* Are we the last open device? */ 2192 if (atomic_dec_and_test(&fc->dev_count)) { 2193 WARN_ON(fc->iq.fasync != NULL); 2194 fuse_abort_conn(fc); 2195 } 2196 fuse_dev_free(fud); 2197 } 2198 return 0; 2199 } 2200 EXPORT_SYMBOL_GPL(fuse_dev_release); 2201 2202 static int fuse_dev_fasync(int fd, struct file *file, int on) 2203 { 2204 struct fuse_dev *fud = fuse_get_dev(file); 2205 2206 if (!fud) 2207 return -EPERM; 2208 2209 /* No locking - fasync_helper does its own locking */ 2210 return fasync_helper(fd, file, on, &fud->fc->iq.fasync); 2211 } 2212 2213 static int fuse_device_clone(struct fuse_conn *fc, struct file *new) 2214 { 2215 struct fuse_dev *fud; 2216 2217 if (new->private_data) 2218 return -EINVAL; 2219 2220 fud = fuse_dev_alloc_install(fc); 2221 if (!fud) 2222 return -ENOMEM; 2223 2224 new->private_data = fud; 2225 atomic_inc(&fc->dev_count); 2226 2227 return 0; 2228 } 2229 2230 static long fuse_dev_ioctl(struct file *file, unsigned int cmd, 2231 unsigned long arg) 2232 { 2233 int err = -ENOTTY; 2234 2235 if (cmd == FUSE_DEV_IOC_CLONE) { 2236 int oldfd; 2237 2238 err = -EFAULT; 2239 if (!get_user(oldfd, (__u32 __user *) arg)) { 2240 struct file *old = fget(oldfd); 2241 2242 err = -EINVAL; 2243 if (old) { 2244 struct fuse_dev *fud = NULL; 2245 2246 /* 2247 * Check against file->f_op because CUSE 2248 * uses the same ioctl handler. 2249 */ 2250 if (old->f_op == file->f_op && 2251 old->f_cred->user_ns == file->f_cred->user_ns) 2252 fud = fuse_get_dev(old); 2253 2254 if (fud) { 2255 mutex_lock(&fuse_mutex); 2256 err = fuse_device_clone(fud->fc, file); 2257 mutex_unlock(&fuse_mutex); 2258 } 2259 fput(old); 2260 } 2261 } 2262 } 2263 return err; 2264 } 2265 2266 const struct file_operations fuse_dev_operations = { 2267 .owner = THIS_MODULE, 2268 .open = fuse_dev_open, 2269 .llseek = no_llseek, 2270 .read_iter = fuse_dev_read, 2271 .splice_read = fuse_dev_splice_read, 2272 .write_iter = fuse_dev_write, 2273 .splice_write = fuse_dev_splice_write, 2274 .poll = fuse_dev_poll, 2275 .release = fuse_dev_release, 2276 .fasync = fuse_dev_fasync, 2277 .unlocked_ioctl = fuse_dev_ioctl, 2278 .compat_ioctl = compat_ptr_ioctl, 2279 }; 2280 EXPORT_SYMBOL_GPL(fuse_dev_operations); 2281 2282 static struct miscdevice fuse_miscdevice = { 2283 .minor = FUSE_MINOR, 2284 .name = "fuse", 2285 .fops = &fuse_dev_operations, 2286 }; 2287 2288 int __init fuse_dev_init(void) 2289 { 2290 int err = -ENOMEM; 2291 fuse_req_cachep = kmem_cache_create("fuse_request", 2292 sizeof(struct fuse_req), 2293 0, 0, NULL); 2294 if (!fuse_req_cachep) 2295 goto out; 2296 2297 err = misc_register(&fuse_miscdevice); 2298 if (err) 2299 goto out_cache_clean; 2300 2301 return 0; 2302 2303 out_cache_clean: 2304 kmem_cache_destroy(fuse_req_cachep); 2305 out: 2306 return err; 2307 } 2308 2309 void fuse_dev_cleanup(void) 2310 { 2311 misc_deregister(&fuse_miscdevice); 2312 kmem_cache_destroy(fuse_req_cachep); 2313 } 2314