1 /* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7 */ 8 9 #include "fuse_i.h" 10 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/poll.h> 14 #include <linux/sched/signal.h> 15 #include <linux/uio.h> 16 #include <linux/miscdevice.h> 17 #include <linux/pagemap.h> 18 #include <linux/file.h> 19 #include <linux/slab.h> 20 #include <linux/pipe_fs_i.h> 21 #include <linux/swap.h> 22 #include <linux/splice.h> 23 24 MODULE_ALIAS_MISCDEV(FUSE_MINOR); 25 MODULE_ALIAS("devname:fuse"); 26 27 static struct kmem_cache *fuse_req_cachep; 28 29 static struct fuse_dev *fuse_get_dev(struct file *file) 30 { 31 /* 32 * Lockless access is OK, because file->private data is set 33 * once during mount and is valid until the file is released. 34 */ 35 return ACCESS_ONCE(file->private_data); 36 } 37 38 static void fuse_request_init(struct fuse_req *req, struct page **pages, 39 struct fuse_page_desc *page_descs, 40 unsigned npages) 41 { 42 memset(req, 0, sizeof(*req)); 43 memset(pages, 0, sizeof(*pages) * npages); 44 memset(page_descs, 0, sizeof(*page_descs) * npages); 45 INIT_LIST_HEAD(&req->list); 46 INIT_LIST_HEAD(&req->intr_entry); 47 init_waitqueue_head(&req->waitq); 48 atomic_set(&req->count, 1); 49 req->pages = pages; 50 req->page_descs = page_descs; 51 req->max_pages = npages; 52 __set_bit(FR_PENDING, &req->flags); 53 } 54 55 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags) 56 { 57 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags); 58 if (req) { 59 struct page **pages; 60 struct fuse_page_desc *page_descs; 61 62 if (npages <= FUSE_REQ_INLINE_PAGES) { 63 pages = req->inline_pages; 64 page_descs = req->inline_page_descs; 65 } else { 66 pages = kmalloc(sizeof(struct page *) * npages, flags); 67 page_descs = kmalloc(sizeof(struct fuse_page_desc) * 68 npages, flags); 69 } 70 71 if (!pages || !page_descs) { 72 kfree(pages); 73 kfree(page_descs); 74 kmem_cache_free(fuse_req_cachep, req); 75 return NULL; 76 } 77 78 fuse_request_init(req, pages, page_descs, npages); 79 } 80 return req; 81 } 82 83 struct fuse_req *fuse_request_alloc(unsigned npages) 84 { 85 return __fuse_request_alloc(npages, GFP_KERNEL); 86 } 87 EXPORT_SYMBOL_GPL(fuse_request_alloc); 88 89 struct fuse_req *fuse_request_alloc_nofs(unsigned npages) 90 { 91 return __fuse_request_alloc(npages, GFP_NOFS); 92 } 93 94 void fuse_request_free(struct fuse_req *req) 95 { 96 if (req->pages != req->inline_pages) { 97 kfree(req->pages); 98 kfree(req->page_descs); 99 } 100 kmem_cache_free(fuse_req_cachep, req); 101 } 102 103 void __fuse_get_request(struct fuse_req *req) 104 { 105 atomic_inc(&req->count); 106 } 107 108 /* Must be called with > 1 refcount */ 109 static void __fuse_put_request(struct fuse_req *req) 110 { 111 BUG_ON(atomic_read(&req->count) < 2); 112 atomic_dec(&req->count); 113 } 114 115 static void fuse_req_init_context(struct fuse_req *req) 116 { 117 req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid()); 118 req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid()); 119 req->in.h.pid = current->pid; 120 } 121 122 void fuse_set_initialized(struct fuse_conn *fc) 123 { 124 /* Make sure stores before this are seen on another CPU */ 125 smp_wmb(); 126 fc->initialized = 1; 127 } 128 129 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) 130 { 131 return !fc->initialized || (for_background && fc->blocked); 132 } 133 134 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, 135 bool for_background) 136 { 137 struct fuse_req *req; 138 int err; 139 atomic_inc(&fc->num_waiting); 140 141 if (fuse_block_alloc(fc, for_background)) { 142 err = -EINTR; 143 if (wait_event_killable_exclusive(fc->blocked_waitq, 144 !fuse_block_alloc(fc, for_background))) 145 goto out; 146 } 147 /* Matches smp_wmb() in fuse_set_initialized() */ 148 smp_rmb(); 149 150 err = -ENOTCONN; 151 if (!fc->connected) 152 goto out; 153 154 err = -ECONNREFUSED; 155 if (fc->conn_error) 156 goto out; 157 158 req = fuse_request_alloc(npages); 159 err = -ENOMEM; 160 if (!req) { 161 if (for_background) 162 wake_up(&fc->blocked_waitq); 163 goto out; 164 } 165 166 fuse_req_init_context(req); 167 __set_bit(FR_WAITING, &req->flags); 168 if (for_background) 169 __set_bit(FR_BACKGROUND, &req->flags); 170 171 return req; 172 173 out: 174 atomic_dec(&fc->num_waiting); 175 return ERR_PTR(err); 176 } 177 178 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages) 179 { 180 return __fuse_get_req(fc, npages, false); 181 } 182 EXPORT_SYMBOL_GPL(fuse_get_req); 183 184 struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc, 185 unsigned npages) 186 { 187 return __fuse_get_req(fc, npages, true); 188 } 189 EXPORT_SYMBOL_GPL(fuse_get_req_for_background); 190 191 /* 192 * Return request in fuse_file->reserved_req. However that may 193 * currently be in use. If that is the case, wait for it to become 194 * available. 195 */ 196 static struct fuse_req *get_reserved_req(struct fuse_conn *fc, 197 struct file *file) 198 { 199 struct fuse_req *req = NULL; 200 struct fuse_file *ff = file->private_data; 201 202 do { 203 wait_event(fc->reserved_req_waitq, ff->reserved_req); 204 spin_lock(&fc->lock); 205 if (ff->reserved_req) { 206 req = ff->reserved_req; 207 ff->reserved_req = NULL; 208 req->stolen_file = get_file(file); 209 } 210 spin_unlock(&fc->lock); 211 } while (!req); 212 213 return req; 214 } 215 216 /* 217 * Put stolen request back into fuse_file->reserved_req 218 */ 219 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req) 220 { 221 struct file *file = req->stolen_file; 222 struct fuse_file *ff = file->private_data; 223 224 spin_lock(&fc->lock); 225 fuse_request_init(req, req->pages, req->page_descs, req->max_pages); 226 BUG_ON(ff->reserved_req); 227 ff->reserved_req = req; 228 wake_up_all(&fc->reserved_req_waitq); 229 spin_unlock(&fc->lock); 230 fput(file); 231 } 232 233 /* 234 * Gets a requests for a file operation, always succeeds 235 * 236 * This is used for sending the FLUSH request, which must get to 237 * userspace, due to POSIX locks which may need to be unlocked. 238 * 239 * If allocation fails due to OOM, use the reserved request in 240 * fuse_file. 241 * 242 * This is very unlikely to deadlock accidentally, since the 243 * filesystem should not have it's own file open. If deadlock is 244 * intentional, it can still be broken by "aborting" the filesystem. 245 */ 246 struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc, 247 struct file *file) 248 { 249 struct fuse_req *req; 250 251 atomic_inc(&fc->num_waiting); 252 wait_event(fc->blocked_waitq, fc->initialized); 253 /* Matches smp_wmb() in fuse_set_initialized() */ 254 smp_rmb(); 255 req = fuse_request_alloc(0); 256 if (!req) 257 req = get_reserved_req(fc, file); 258 259 fuse_req_init_context(req); 260 __set_bit(FR_WAITING, &req->flags); 261 __clear_bit(FR_BACKGROUND, &req->flags); 262 return req; 263 } 264 265 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) 266 { 267 if (atomic_dec_and_test(&req->count)) { 268 if (test_bit(FR_BACKGROUND, &req->flags)) { 269 /* 270 * We get here in the unlikely case that a background 271 * request was allocated but not sent 272 */ 273 spin_lock(&fc->lock); 274 if (!fc->blocked) 275 wake_up(&fc->blocked_waitq); 276 spin_unlock(&fc->lock); 277 } 278 279 if (test_bit(FR_WAITING, &req->flags)) { 280 __clear_bit(FR_WAITING, &req->flags); 281 atomic_dec(&fc->num_waiting); 282 } 283 284 if (req->stolen_file) 285 put_reserved_req(fc, req); 286 else 287 fuse_request_free(req); 288 } 289 } 290 EXPORT_SYMBOL_GPL(fuse_put_request); 291 292 static unsigned len_args(unsigned numargs, struct fuse_arg *args) 293 { 294 unsigned nbytes = 0; 295 unsigned i; 296 297 for (i = 0; i < numargs; i++) 298 nbytes += args[i].size; 299 300 return nbytes; 301 } 302 303 static u64 fuse_get_unique(struct fuse_iqueue *fiq) 304 { 305 return ++fiq->reqctr; 306 } 307 308 static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req) 309 { 310 req->in.h.len = sizeof(struct fuse_in_header) + 311 len_args(req->in.numargs, (struct fuse_arg *) req->in.args); 312 list_add_tail(&req->list, &fiq->pending); 313 wake_up_locked(&fiq->waitq); 314 kill_fasync(&fiq->fasync, SIGIO, POLL_IN); 315 } 316 317 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, 318 u64 nodeid, u64 nlookup) 319 { 320 struct fuse_iqueue *fiq = &fc->iq; 321 322 forget->forget_one.nodeid = nodeid; 323 forget->forget_one.nlookup = nlookup; 324 325 spin_lock(&fiq->waitq.lock); 326 if (fiq->connected) { 327 fiq->forget_list_tail->next = forget; 328 fiq->forget_list_tail = forget; 329 wake_up_locked(&fiq->waitq); 330 kill_fasync(&fiq->fasync, SIGIO, POLL_IN); 331 } else { 332 kfree(forget); 333 } 334 spin_unlock(&fiq->waitq.lock); 335 } 336 337 static void flush_bg_queue(struct fuse_conn *fc) 338 { 339 while (fc->active_background < fc->max_background && 340 !list_empty(&fc->bg_queue)) { 341 struct fuse_req *req; 342 struct fuse_iqueue *fiq = &fc->iq; 343 344 req = list_entry(fc->bg_queue.next, struct fuse_req, list); 345 list_del(&req->list); 346 fc->active_background++; 347 spin_lock(&fiq->waitq.lock); 348 req->in.h.unique = fuse_get_unique(fiq); 349 queue_request(fiq, req); 350 spin_unlock(&fiq->waitq.lock); 351 } 352 } 353 354 /* 355 * This function is called when a request is finished. Either a reply 356 * has arrived or it was aborted (and not yet sent) or some error 357 * occurred during communication with userspace, or the device file 358 * was closed. The requester thread is woken up (if still waiting), 359 * the 'end' callback is called if given, else the reference to the 360 * request is released 361 */ 362 static void request_end(struct fuse_conn *fc, struct fuse_req *req) 363 { 364 struct fuse_iqueue *fiq = &fc->iq; 365 366 if (test_and_set_bit(FR_FINISHED, &req->flags)) 367 return; 368 369 spin_lock(&fiq->waitq.lock); 370 list_del_init(&req->intr_entry); 371 spin_unlock(&fiq->waitq.lock); 372 WARN_ON(test_bit(FR_PENDING, &req->flags)); 373 WARN_ON(test_bit(FR_SENT, &req->flags)); 374 if (test_bit(FR_BACKGROUND, &req->flags)) { 375 spin_lock(&fc->lock); 376 clear_bit(FR_BACKGROUND, &req->flags); 377 if (fc->num_background == fc->max_background) 378 fc->blocked = 0; 379 380 /* Wake up next waiter, if any */ 381 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq)) 382 wake_up(&fc->blocked_waitq); 383 384 if (fc->num_background == fc->congestion_threshold && 385 fc->connected && fc->bdi_initialized) { 386 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC); 387 clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC); 388 } 389 fc->num_background--; 390 fc->active_background--; 391 flush_bg_queue(fc); 392 spin_unlock(&fc->lock); 393 } 394 wake_up(&req->waitq); 395 if (req->end) 396 req->end(fc, req); 397 fuse_put_request(fc, req); 398 } 399 400 static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) 401 { 402 spin_lock(&fiq->waitq.lock); 403 if (test_bit(FR_FINISHED, &req->flags)) { 404 spin_unlock(&fiq->waitq.lock); 405 return; 406 } 407 if (list_empty(&req->intr_entry)) { 408 list_add_tail(&req->intr_entry, &fiq->interrupts); 409 wake_up_locked(&fiq->waitq); 410 } 411 spin_unlock(&fiq->waitq.lock); 412 kill_fasync(&fiq->fasync, SIGIO, POLL_IN); 413 } 414 415 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) 416 { 417 struct fuse_iqueue *fiq = &fc->iq; 418 int err; 419 420 if (!fc->no_interrupt) { 421 /* Any signal may interrupt this */ 422 err = wait_event_interruptible(req->waitq, 423 test_bit(FR_FINISHED, &req->flags)); 424 if (!err) 425 return; 426 427 set_bit(FR_INTERRUPTED, &req->flags); 428 /* matches barrier in fuse_dev_do_read() */ 429 smp_mb__after_atomic(); 430 if (test_bit(FR_SENT, &req->flags)) 431 queue_interrupt(fiq, req); 432 } 433 434 if (!test_bit(FR_FORCE, &req->flags)) { 435 /* Only fatal signals may interrupt this */ 436 err = wait_event_killable(req->waitq, 437 test_bit(FR_FINISHED, &req->flags)); 438 if (!err) 439 return; 440 441 spin_lock(&fiq->waitq.lock); 442 /* Request is not yet in userspace, bail out */ 443 if (test_bit(FR_PENDING, &req->flags)) { 444 list_del(&req->list); 445 spin_unlock(&fiq->waitq.lock); 446 __fuse_put_request(req); 447 req->out.h.error = -EINTR; 448 return; 449 } 450 spin_unlock(&fiq->waitq.lock); 451 } 452 453 /* 454 * Either request is already in userspace, or it was forced. 455 * Wait it out. 456 */ 457 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags)); 458 } 459 460 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) 461 { 462 struct fuse_iqueue *fiq = &fc->iq; 463 464 BUG_ON(test_bit(FR_BACKGROUND, &req->flags)); 465 spin_lock(&fiq->waitq.lock); 466 if (!fiq->connected) { 467 spin_unlock(&fiq->waitq.lock); 468 req->out.h.error = -ENOTCONN; 469 } else { 470 req->in.h.unique = fuse_get_unique(fiq); 471 queue_request(fiq, req); 472 /* acquire extra reference, since request is still needed 473 after request_end() */ 474 __fuse_get_request(req); 475 spin_unlock(&fiq->waitq.lock); 476 477 request_wait_answer(fc, req); 478 /* Pairs with smp_wmb() in request_end() */ 479 smp_rmb(); 480 } 481 } 482 483 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) 484 { 485 __set_bit(FR_ISREPLY, &req->flags); 486 if (!test_bit(FR_WAITING, &req->flags)) { 487 __set_bit(FR_WAITING, &req->flags); 488 atomic_inc(&fc->num_waiting); 489 } 490 __fuse_request_send(fc, req); 491 } 492 EXPORT_SYMBOL_GPL(fuse_request_send); 493 494 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) 495 { 496 if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS) 497 args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE; 498 499 if (fc->minor < 9) { 500 switch (args->in.h.opcode) { 501 case FUSE_LOOKUP: 502 case FUSE_CREATE: 503 case FUSE_MKNOD: 504 case FUSE_MKDIR: 505 case FUSE_SYMLINK: 506 case FUSE_LINK: 507 args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; 508 break; 509 case FUSE_GETATTR: 510 case FUSE_SETATTR: 511 args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; 512 break; 513 } 514 } 515 if (fc->minor < 12) { 516 switch (args->in.h.opcode) { 517 case FUSE_CREATE: 518 args->in.args[0].size = sizeof(struct fuse_open_in); 519 break; 520 case FUSE_MKNOD: 521 args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE; 522 break; 523 } 524 } 525 } 526 527 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) 528 { 529 struct fuse_req *req; 530 ssize_t ret; 531 532 req = fuse_get_req(fc, 0); 533 if (IS_ERR(req)) 534 return PTR_ERR(req); 535 536 /* Needs to be done after fuse_get_req() so that fc->minor is valid */ 537 fuse_adjust_compat(fc, args); 538 539 req->in.h.opcode = args->in.h.opcode; 540 req->in.h.nodeid = args->in.h.nodeid; 541 req->in.numargs = args->in.numargs; 542 memcpy(req->in.args, args->in.args, 543 args->in.numargs * sizeof(struct fuse_in_arg)); 544 req->out.argvar = args->out.argvar; 545 req->out.numargs = args->out.numargs; 546 memcpy(req->out.args, args->out.args, 547 args->out.numargs * sizeof(struct fuse_arg)); 548 fuse_request_send(fc, req); 549 ret = req->out.h.error; 550 if (!ret && args->out.argvar) { 551 BUG_ON(args->out.numargs != 1); 552 ret = req->out.args[0].size; 553 } 554 fuse_put_request(fc, req); 555 556 return ret; 557 } 558 559 /* 560 * Called under fc->lock 561 * 562 * fc->connected must have been checked previously 563 */ 564 void fuse_request_send_background_locked(struct fuse_conn *fc, 565 struct fuse_req *req) 566 { 567 BUG_ON(!test_bit(FR_BACKGROUND, &req->flags)); 568 if (!test_bit(FR_WAITING, &req->flags)) { 569 __set_bit(FR_WAITING, &req->flags); 570 atomic_inc(&fc->num_waiting); 571 } 572 __set_bit(FR_ISREPLY, &req->flags); 573 fc->num_background++; 574 if (fc->num_background == fc->max_background) 575 fc->blocked = 1; 576 if (fc->num_background == fc->congestion_threshold && 577 fc->bdi_initialized) { 578 set_bdi_congested(&fc->bdi, BLK_RW_SYNC); 579 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC); 580 } 581 list_add_tail(&req->list, &fc->bg_queue); 582 flush_bg_queue(fc); 583 } 584 585 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) 586 { 587 BUG_ON(!req->end); 588 spin_lock(&fc->lock); 589 if (fc->connected) { 590 fuse_request_send_background_locked(fc, req); 591 spin_unlock(&fc->lock); 592 } else { 593 spin_unlock(&fc->lock); 594 req->out.h.error = -ENOTCONN; 595 req->end(fc, req); 596 fuse_put_request(fc, req); 597 } 598 } 599 EXPORT_SYMBOL_GPL(fuse_request_send_background); 600 601 static int fuse_request_send_notify_reply(struct fuse_conn *fc, 602 struct fuse_req *req, u64 unique) 603 { 604 int err = -ENODEV; 605 struct fuse_iqueue *fiq = &fc->iq; 606 607 __clear_bit(FR_ISREPLY, &req->flags); 608 req->in.h.unique = unique; 609 spin_lock(&fiq->waitq.lock); 610 if (fiq->connected) { 611 queue_request(fiq, req); 612 err = 0; 613 } 614 spin_unlock(&fiq->waitq.lock); 615 616 return err; 617 } 618 619 void fuse_force_forget(struct file *file, u64 nodeid) 620 { 621 struct inode *inode = file_inode(file); 622 struct fuse_conn *fc = get_fuse_conn(inode); 623 struct fuse_req *req; 624 struct fuse_forget_in inarg; 625 626 memset(&inarg, 0, sizeof(inarg)); 627 inarg.nlookup = 1; 628 req = fuse_get_req_nofail_nopages(fc, file); 629 req->in.h.opcode = FUSE_FORGET; 630 req->in.h.nodeid = nodeid; 631 req->in.numargs = 1; 632 req->in.args[0].size = sizeof(inarg); 633 req->in.args[0].value = &inarg; 634 __clear_bit(FR_ISREPLY, &req->flags); 635 __fuse_request_send(fc, req); 636 /* ignore errors */ 637 fuse_put_request(fc, req); 638 } 639 640 /* 641 * Lock the request. Up to the next unlock_request() there mustn't be 642 * anything that could cause a page-fault. If the request was already 643 * aborted bail out. 644 */ 645 static int lock_request(struct fuse_req *req) 646 { 647 int err = 0; 648 if (req) { 649 spin_lock(&req->waitq.lock); 650 if (test_bit(FR_ABORTED, &req->flags)) 651 err = -ENOENT; 652 else 653 set_bit(FR_LOCKED, &req->flags); 654 spin_unlock(&req->waitq.lock); 655 } 656 return err; 657 } 658 659 /* 660 * Unlock request. If it was aborted while locked, caller is responsible 661 * for unlocking and ending the request. 662 */ 663 static int unlock_request(struct fuse_req *req) 664 { 665 int err = 0; 666 if (req) { 667 spin_lock(&req->waitq.lock); 668 if (test_bit(FR_ABORTED, &req->flags)) 669 err = -ENOENT; 670 else 671 clear_bit(FR_LOCKED, &req->flags); 672 spin_unlock(&req->waitq.lock); 673 } 674 return err; 675 } 676 677 struct fuse_copy_state { 678 int write; 679 struct fuse_req *req; 680 struct iov_iter *iter; 681 struct pipe_buffer *pipebufs; 682 struct pipe_buffer *currbuf; 683 struct pipe_inode_info *pipe; 684 unsigned long nr_segs; 685 struct page *pg; 686 unsigned len; 687 unsigned offset; 688 unsigned move_pages:1; 689 }; 690 691 static void fuse_copy_init(struct fuse_copy_state *cs, int write, 692 struct iov_iter *iter) 693 { 694 memset(cs, 0, sizeof(*cs)); 695 cs->write = write; 696 cs->iter = iter; 697 } 698 699 /* Unmap and put previous page of userspace buffer */ 700 static void fuse_copy_finish(struct fuse_copy_state *cs) 701 { 702 if (cs->currbuf) { 703 struct pipe_buffer *buf = cs->currbuf; 704 705 if (cs->write) 706 buf->len = PAGE_SIZE - cs->len; 707 cs->currbuf = NULL; 708 } else if (cs->pg) { 709 if (cs->write) { 710 flush_dcache_page(cs->pg); 711 set_page_dirty_lock(cs->pg); 712 } 713 put_page(cs->pg); 714 } 715 cs->pg = NULL; 716 } 717 718 /* 719 * Get another pagefull of userspace buffer, and map it to kernel 720 * address space, and lock request 721 */ 722 static int fuse_copy_fill(struct fuse_copy_state *cs) 723 { 724 struct page *page; 725 int err; 726 727 err = unlock_request(cs->req); 728 if (err) 729 return err; 730 731 fuse_copy_finish(cs); 732 if (cs->pipebufs) { 733 struct pipe_buffer *buf = cs->pipebufs; 734 735 if (!cs->write) { 736 err = pipe_buf_confirm(cs->pipe, buf); 737 if (err) 738 return err; 739 740 BUG_ON(!cs->nr_segs); 741 cs->currbuf = buf; 742 cs->pg = buf->page; 743 cs->offset = buf->offset; 744 cs->len = buf->len; 745 cs->pipebufs++; 746 cs->nr_segs--; 747 } else { 748 if (cs->nr_segs == cs->pipe->buffers) 749 return -EIO; 750 751 page = alloc_page(GFP_HIGHUSER); 752 if (!page) 753 return -ENOMEM; 754 755 buf->page = page; 756 buf->offset = 0; 757 buf->len = 0; 758 759 cs->currbuf = buf; 760 cs->pg = page; 761 cs->offset = 0; 762 cs->len = PAGE_SIZE; 763 cs->pipebufs++; 764 cs->nr_segs++; 765 } 766 } else { 767 size_t off; 768 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off); 769 if (err < 0) 770 return err; 771 BUG_ON(!err); 772 cs->len = err; 773 cs->offset = off; 774 cs->pg = page; 775 iov_iter_advance(cs->iter, err); 776 } 777 778 return lock_request(cs->req); 779 } 780 781 /* Do as much copy to/from userspace buffer as we can */ 782 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) 783 { 784 unsigned ncpy = min(*size, cs->len); 785 if (val) { 786 void *pgaddr = kmap_atomic(cs->pg); 787 void *buf = pgaddr + cs->offset; 788 789 if (cs->write) 790 memcpy(buf, *val, ncpy); 791 else 792 memcpy(*val, buf, ncpy); 793 794 kunmap_atomic(pgaddr); 795 *val += ncpy; 796 } 797 *size -= ncpy; 798 cs->len -= ncpy; 799 cs->offset += ncpy; 800 return ncpy; 801 } 802 803 static int fuse_check_page(struct page *page) 804 { 805 if (page_mapcount(page) || 806 page->mapping != NULL || 807 page_count(page) != 1 || 808 (page->flags & PAGE_FLAGS_CHECK_AT_PREP & 809 ~(1 << PG_locked | 810 1 << PG_referenced | 811 1 << PG_uptodate | 812 1 << PG_lru | 813 1 << PG_active | 814 1 << PG_reclaim))) { 815 printk(KERN_WARNING "fuse: trying to steal weird page\n"); 816 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping); 817 return 1; 818 } 819 return 0; 820 } 821 822 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) 823 { 824 int err; 825 struct page *oldpage = *pagep; 826 struct page *newpage; 827 struct pipe_buffer *buf = cs->pipebufs; 828 829 err = unlock_request(cs->req); 830 if (err) 831 return err; 832 833 fuse_copy_finish(cs); 834 835 err = pipe_buf_confirm(cs->pipe, buf); 836 if (err) 837 return err; 838 839 BUG_ON(!cs->nr_segs); 840 cs->currbuf = buf; 841 cs->len = buf->len; 842 cs->pipebufs++; 843 cs->nr_segs--; 844 845 if (cs->len != PAGE_SIZE) 846 goto out_fallback; 847 848 if (pipe_buf_steal(cs->pipe, buf) != 0) 849 goto out_fallback; 850 851 newpage = buf->page; 852 853 if (!PageUptodate(newpage)) 854 SetPageUptodate(newpage); 855 856 ClearPageMappedToDisk(newpage); 857 858 if (fuse_check_page(newpage) != 0) 859 goto out_fallback_unlock; 860 861 /* 862 * This is a new and locked page, it shouldn't be mapped or 863 * have any special flags on it 864 */ 865 if (WARN_ON(page_mapped(oldpage))) 866 goto out_fallback_unlock; 867 if (WARN_ON(page_has_private(oldpage))) 868 goto out_fallback_unlock; 869 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage))) 870 goto out_fallback_unlock; 871 if (WARN_ON(PageMlocked(oldpage))) 872 goto out_fallback_unlock; 873 874 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL); 875 if (err) { 876 unlock_page(newpage); 877 return err; 878 } 879 880 get_page(newpage); 881 882 if (!(buf->flags & PIPE_BUF_FLAG_LRU)) 883 lru_cache_add_file(newpage); 884 885 err = 0; 886 spin_lock(&cs->req->waitq.lock); 887 if (test_bit(FR_ABORTED, &cs->req->flags)) 888 err = -ENOENT; 889 else 890 *pagep = newpage; 891 spin_unlock(&cs->req->waitq.lock); 892 893 if (err) { 894 unlock_page(newpage); 895 put_page(newpage); 896 return err; 897 } 898 899 unlock_page(oldpage); 900 put_page(oldpage); 901 cs->len = 0; 902 903 return 0; 904 905 out_fallback_unlock: 906 unlock_page(newpage); 907 out_fallback: 908 cs->pg = buf->page; 909 cs->offset = buf->offset; 910 911 err = lock_request(cs->req); 912 if (err) 913 return err; 914 915 return 1; 916 } 917 918 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page, 919 unsigned offset, unsigned count) 920 { 921 struct pipe_buffer *buf; 922 int err; 923 924 if (cs->nr_segs == cs->pipe->buffers) 925 return -EIO; 926 927 err = unlock_request(cs->req); 928 if (err) 929 return err; 930 931 fuse_copy_finish(cs); 932 933 buf = cs->pipebufs; 934 get_page(page); 935 buf->page = page; 936 buf->offset = offset; 937 buf->len = count; 938 939 cs->pipebufs++; 940 cs->nr_segs++; 941 cs->len = 0; 942 943 return 0; 944 } 945 946 /* 947 * Copy a page in the request to/from the userspace buffer. Must be 948 * done atomically 949 */ 950 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep, 951 unsigned offset, unsigned count, int zeroing) 952 { 953 int err; 954 struct page *page = *pagep; 955 956 if (page && zeroing && count < PAGE_SIZE) 957 clear_highpage(page); 958 959 while (count) { 960 if (cs->write && cs->pipebufs && page) { 961 return fuse_ref_page(cs, page, offset, count); 962 } else if (!cs->len) { 963 if (cs->move_pages && page && 964 offset == 0 && count == PAGE_SIZE) { 965 err = fuse_try_move_page(cs, pagep); 966 if (err <= 0) 967 return err; 968 } else { 969 err = fuse_copy_fill(cs); 970 if (err) 971 return err; 972 } 973 } 974 if (page) { 975 void *mapaddr = kmap_atomic(page); 976 void *buf = mapaddr + offset; 977 offset += fuse_copy_do(cs, &buf, &count); 978 kunmap_atomic(mapaddr); 979 } else 980 offset += fuse_copy_do(cs, NULL, &count); 981 } 982 if (page && !cs->write) 983 flush_dcache_page(page); 984 return 0; 985 } 986 987 /* Copy pages in the request to/from userspace buffer */ 988 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes, 989 int zeroing) 990 { 991 unsigned i; 992 struct fuse_req *req = cs->req; 993 994 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) { 995 int err; 996 unsigned offset = req->page_descs[i].offset; 997 unsigned count = min(nbytes, req->page_descs[i].length); 998 999 err = fuse_copy_page(cs, &req->pages[i], offset, count, 1000 zeroing); 1001 if (err) 1002 return err; 1003 1004 nbytes -= count; 1005 } 1006 return 0; 1007 } 1008 1009 /* Copy a single argument in the request to/from userspace buffer */ 1010 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size) 1011 { 1012 while (size) { 1013 if (!cs->len) { 1014 int err = fuse_copy_fill(cs); 1015 if (err) 1016 return err; 1017 } 1018 fuse_copy_do(cs, &val, &size); 1019 } 1020 return 0; 1021 } 1022 1023 /* Copy request arguments to/from userspace buffer */ 1024 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, 1025 unsigned argpages, struct fuse_arg *args, 1026 int zeroing) 1027 { 1028 int err = 0; 1029 unsigned i; 1030 1031 for (i = 0; !err && i < numargs; i++) { 1032 struct fuse_arg *arg = &args[i]; 1033 if (i == numargs - 1 && argpages) 1034 err = fuse_copy_pages(cs, arg->size, zeroing); 1035 else 1036 err = fuse_copy_one(cs, arg->value, arg->size); 1037 } 1038 return err; 1039 } 1040 1041 static int forget_pending(struct fuse_iqueue *fiq) 1042 { 1043 return fiq->forget_list_head.next != NULL; 1044 } 1045 1046 static int request_pending(struct fuse_iqueue *fiq) 1047 { 1048 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) || 1049 forget_pending(fiq); 1050 } 1051 1052 /* 1053 * Transfer an interrupt request to userspace 1054 * 1055 * Unlike other requests this is assembled on demand, without a need 1056 * to allocate a separate fuse_req structure. 1057 * 1058 * Called with fiq->waitq.lock held, releases it 1059 */ 1060 static int fuse_read_interrupt(struct fuse_iqueue *fiq, 1061 struct fuse_copy_state *cs, 1062 size_t nbytes, struct fuse_req *req) 1063 __releases(fiq->waitq.lock) 1064 { 1065 struct fuse_in_header ih; 1066 struct fuse_interrupt_in arg; 1067 unsigned reqsize = sizeof(ih) + sizeof(arg); 1068 int err; 1069 1070 list_del_init(&req->intr_entry); 1071 req->intr_unique = fuse_get_unique(fiq); 1072 memset(&ih, 0, sizeof(ih)); 1073 memset(&arg, 0, sizeof(arg)); 1074 ih.len = reqsize; 1075 ih.opcode = FUSE_INTERRUPT; 1076 ih.unique = req->intr_unique; 1077 arg.unique = req->in.h.unique; 1078 1079 spin_unlock(&fiq->waitq.lock); 1080 if (nbytes < reqsize) 1081 return -EINVAL; 1082 1083 err = fuse_copy_one(cs, &ih, sizeof(ih)); 1084 if (!err) 1085 err = fuse_copy_one(cs, &arg, sizeof(arg)); 1086 fuse_copy_finish(cs); 1087 1088 return err ? err : reqsize; 1089 } 1090 1091 static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq, 1092 unsigned max, 1093 unsigned *countp) 1094 { 1095 struct fuse_forget_link *head = fiq->forget_list_head.next; 1096 struct fuse_forget_link **newhead = &head; 1097 unsigned count; 1098 1099 for (count = 0; *newhead != NULL && count < max; count++) 1100 newhead = &(*newhead)->next; 1101 1102 fiq->forget_list_head.next = *newhead; 1103 *newhead = NULL; 1104 if (fiq->forget_list_head.next == NULL) 1105 fiq->forget_list_tail = &fiq->forget_list_head; 1106 1107 if (countp != NULL) 1108 *countp = count; 1109 1110 return head; 1111 } 1112 1113 static int fuse_read_single_forget(struct fuse_iqueue *fiq, 1114 struct fuse_copy_state *cs, 1115 size_t nbytes) 1116 __releases(fiq->waitq.lock) 1117 { 1118 int err; 1119 struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL); 1120 struct fuse_forget_in arg = { 1121 .nlookup = forget->forget_one.nlookup, 1122 }; 1123 struct fuse_in_header ih = { 1124 .opcode = FUSE_FORGET, 1125 .nodeid = forget->forget_one.nodeid, 1126 .unique = fuse_get_unique(fiq), 1127 .len = sizeof(ih) + sizeof(arg), 1128 }; 1129 1130 spin_unlock(&fiq->waitq.lock); 1131 kfree(forget); 1132 if (nbytes < ih.len) 1133 return -EINVAL; 1134 1135 err = fuse_copy_one(cs, &ih, sizeof(ih)); 1136 if (!err) 1137 err = fuse_copy_one(cs, &arg, sizeof(arg)); 1138 fuse_copy_finish(cs); 1139 1140 if (err) 1141 return err; 1142 1143 return ih.len; 1144 } 1145 1146 static int fuse_read_batch_forget(struct fuse_iqueue *fiq, 1147 struct fuse_copy_state *cs, size_t nbytes) 1148 __releases(fiq->waitq.lock) 1149 { 1150 int err; 1151 unsigned max_forgets; 1152 unsigned count; 1153 struct fuse_forget_link *head; 1154 struct fuse_batch_forget_in arg = { .count = 0 }; 1155 struct fuse_in_header ih = { 1156 .opcode = FUSE_BATCH_FORGET, 1157 .unique = fuse_get_unique(fiq), 1158 .len = sizeof(ih) + sizeof(arg), 1159 }; 1160 1161 if (nbytes < ih.len) { 1162 spin_unlock(&fiq->waitq.lock); 1163 return -EINVAL; 1164 } 1165 1166 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one); 1167 head = dequeue_forget(fiq, max_forgets, &count); 1168 spin_unlock(&fiq->waitq.lock); 1169 1170 arg.count = count; 1171 ih.len += count * sizeof(struct fuse_forget_one); 1172 err = fuse_copy_one(cs, &ih, sizeof(ih)); 1173 if (!err) 1174 err = fuse_copy_one(cs, &arg, sizeof(arg)); 1175 1176 while (head) { 1177 struct fuse_forget_link *forget = head; 1178 1179 if (!err) { 1180 err = fuse_copy_one(cs, &forget->forget_one, 1181 sizeof(forget->forget_one)); 1182 } 1183 head = forget->next; 1184 kfree(forget); 1185 } 1186 1187 fuse_copy_finish(cs); 1188 1189 if (err) 1190 return err; 1191 1192 return ih.len; 1193 } 1194 1195 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq, 1196 struct fuse_copy_state *cs, 1197 size_t nbytes) 1198 __releases(fiq->waitq.lock) 1199 { 1200 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL) 1201 return fuse_read_single_forget(fiq, cs, nbytes); 1202 else 1203 return fuse_read_batch_forget(fiq, cs, nbytes); 1204 } 1205 1206 /* 1207 * Read a single request into the userspace filesystem's buffer. This 1208 * function waits until a request is available, then removes it from 1209 * the pending list and copies request data to userspace buffer. If 1210 * no reply is needed (FORGET) or request has been aborted or there 1211 * was an error during the copying then it's finished by calling 1212 * request_end(). Otherwise add it to the processing list, and set 1213 * the 'sent' flag. 1214 */ 1215 static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file, 1216 struct fuse_copy_state *cs, size_t nbytes) 1217 { 1218 ssize_t err; 1219 struct fuse_conn *fc = fud->fc; 1220 struct fuse_iqueue *fiq = &fc->iq; 1221 struct fuse_pqueue *fpq = &fud->pq; 1222 struct fuse_req *req; 1223 struct fuse_in *in; 1224 unsigned reqsize; 1225 1226 restart: 1227 spin_lock(&fiq->waitq.lock); 1228 err = -EAGAIN; 1229 if ((file->f_flags & O_NONBLOCK) && fiq->connected && 1230 !request_pending(fiq)) 1231 goto err_unlock; 1232 1233 err = wait_event_interruptible_exclusive_locked(fiq->waitq, 1234 !fiq->connected || request_pending(fiq)); 1235 if (err) 1236 goto err_unlock; 1237 1238 err = -ENODEV; 1239 if (!fiq->connected) 1240 goto err_unlock; 1241 1242 if (!list_empty(&fiq->interrupts)) { 1243 req = list_entry(fiq->interrupts.next, struct fuse_req, 1244 intr_entry); 1245 return fuse_read_interrupt(fiq, cs, nbytes, req); 1246 } 1247 1248 if (forget_pending(fiq)) { 1249 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0) 1250 return fuse_read_forget(fc, fiq, cs, nbytes); 1251 1252 if (fiq->forget_batch <= -8) 1253 fiq->forget_batch = 16; 1254 } 1255 1256 req = list_entry(fiq->pending.next, struct fuse_req, list); 1257 clear_bit(FR_PENDING, &req->flags); 1258 list_del_init(&req->list); 1259 spin_unlock(&fiq->waitq.lock); 1260 1261 in = &req->in; 1262 reqsize = in->h.len; 1263 /* If request is too large, reply with an error and restart the read */ 1264 if (nbytes < reqsize) { 1265 req->out.h.error = -EIO; 1266 /* SETXATTR is special, since it may contain too large data */ 1267 if (in->h.opcode == FUSE_SETXATTR) 1268 req->out.h.error = -E2BIG; 1269 request_end(fc, req); 1270 goto restart; 1271 } 1272 spin_lock(&fpq->lock); 1273 list_add(&req->list, &fpq->io); 1274 spin_unlock(&fpq->lock); 1275 cs->req = req; 1276 err = fuse_copy_one(cs, &in->h, sizeof(in->h)); 1277 if (!err) 1278 err = fuse_copy_args(cs, in->numargs, in->argpages, 1279 (struct fuse_arg *) in->args, 0); 1280 fuse_copy_finish(cs); 1281 spin_lock(&fpq->lock); 1282 clear_bit(FR_LOCKED, &req->flags); 1283 if (!fpq->connected) { 1284 err = -ENODEV; 1285 goto out_end; 1286 } 1287 if (err) { 1288 req->out.h.error = -EIO; 1289 goto out_end; 1290 } 1291 if (!test_bit(FR_ISREPLY, &req->flags)) { 1292 err = reqsize; 1293 goto out_end; 1294 } 1295 list_move_tail(&req->list, &fpq->processing); 1296 spin_unlock(&fpq->lock); 1297 set_bit(FR_SENT, &req->flags); 1298 /* matches barrier in request_wait_answer() */ 1299 smp_mb__after_atomic(); 1300 if (test_bit(FR_INTERRUPTED, &req->flags)) 1301 queue_interrupt(fiq, req); 1302 1303 return reqsize; 1304 1305 out_end: 1306 if (!test_bit(FR_PRIVATE, &req->flags)) 1307 list_del_init(&req->list); 1308 spin_unlock(&fpq->lock); 1309 request_end(fc, req); 1310 return err; 1311 1312 err_unlock: 1313 spin_unlock(&fiq->waitq.lock); 1314 return err; 1315 } 1316 1317 static int fuse_dev_open(struct inode *inode, struct file *file) 1318 { 1319 /* 1320 * The fuse device's file's private_data is used to hold 1321 * the fuse_conn(ection) when it is mounted, and is used to 1322 * keep track of whether the file has been mounted already. 1323 */ 1324 file->private_data = NULL; 1325 return 0; 1326 } 1327 1328 static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to) 1329 { 1330 struct fuse_copy_state cs; 1331 struct file *file = iocb->ki_filp; 1332 struct fuse_dev *fud = fuse_get_dev(file); 1333 1334 if (!fud) 1335 return -EPERM; 1336 1337 if (!iter_is_iovec(to)) 1338 return -EINVAL; 1339 1340 fuse_copy_init(&cs, 1, to); 1341 1342 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to)); 1343 } 1344 1345 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, 1346 struct pipe_inode_info *pipe, 1347 size_t len, unsigned int flags) 1348 { 1349 int total, ret; 1350 int page_nr = 0; 1351 struct pipe_buffer *bufs; 1352 struct fuse_copy_state cs; 1353 struct fuse_dev *fud = fuse_get_dev(in); 1354 1355 if (!fud) 1356 return -EPERM; 1357 1358 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL); 1359 if (!bufs) 1360 return -ENOMEM; 1361 1362 fuse_copy_init(&cs, 1, NULL); 1363 cs.pipebufs = bufs; 1364 cs.pipe = pipe; 1365 ret = fuse_dev_do_read(fud, in, &cs, len); 1366 if (ret < 0) 1367 goto out; 1368 1369 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) { 1370 ret = -EIO; 1371 goto out; 1372 } 1373 1374 for (ret = total = 0; page_nr < cs.nr_segs; total += ret) { 1375 /* 1376 * Need to be careful about this. Having buf->ops in module 1377 * code can Oops if the buffer persists after module unload. 1378 */ 1379 bufs[page_nr].ops = &nosteal_pipe_buf_ops; 1380 bufs[page_nr].flags = 0; 1381 ret = add_to_pipe(pipe, &bufs[page_nr++]); 1382 if (unlikely(ret < 0)) 1383 break; 1384 } 1385 if (total) 1386 ret = total; 1387 out: 1388 for (; page_nr < cs.nr_segs; page_nr++) 1389 put_page(bufs[page_nr].page); 1390 1391 kfree(bufs); 1392 return ret; 1393 } 1394 1395 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, 1396 struct fuse_copy_state *cs) 1397 { 1398 struct fuse_notify_poll_wakeup_out outarg; 1399 int err = -EINVAL; 1400 1401 if (size != sizeof(outarg)) 1402 goto err; 1403 1404 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1405 if (err) 1406 goto err; 1407 1408 fuse_copy_finish(cs); 1409 return fuse_notify_poll_wakeup(fc, &outarg); 1410 1411 err: 1412 fuse_copy_finish(cs); 1413 return err; 1414 } 1415 1416 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size, 1417 struct fuse_copy_state *cs) 1418 { 1419 struct fuse_notify_inval_inode_out outarg; 1420 int err = -EINVAL; 1421 1422 if (size != sizeof(outarg)) 1423 goto err; 1424 1425 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1426 if (err) 1427 goto err; 1428 fuse_copy_finish(cs); 1429 1430 down_read(&fc->killsb); 1431 err = -ENOENT; 1432 if (fc->sb) { 1433 err = fuse_reverse_inval_inode(fc->sb, outarg.ino, 1434 outarg.off, outarg.len); 1435 } 1436 up_read(&fc->killsb); 1437 return err; 1438 1439 err: 1440 fuse_copy_finish(cs); 1441 return err; 1442 } 1443 1444 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, 1445 struct fuse_copy_state *cs) 1446 { 1447 struct fuse_notify_inval_entry_out outarg; 1448 int err = -ENOMEM; 1449 char *buf; 1450 struct qstr name; 1451 1452 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL); 1453 if (!buf) 1454 goto err; 1455 1456 err = -EINVAL; 1457 if (size < sizeof(outarg)) 1458 goto err; 1459 1460 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1461 if (err) 1462 goto err; 1463 1464 err = -ENAMETOOLONG; 1465 if (outarg.namelen > FUSE_NAME_MAX) 1466 goto err; 1467 1468 err = -EINVAL; 1469 if (size != sizeof(outarg) + outarg.namelen + 1) 1470 goto err; 1471 1472 name.name = buf; 1473 name.len = outarg.namelen; 1474 err = fuse_copy_one(cs, buf, outarg.namelen + 1); 1475 if (err) 1476 goto err; 1477 fuse_copy_finish(cs); 1478 buf[outarg.namelen] = 0; 1479 1480 down_read(&fc->killsb); 1481 err = -ENOENT; 1482 if (fc->sb) 1483 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name); 1484 up_read(&fc->killsb); 1485 kfree(buf); 1486 return err; 1487 1488 err: 1489 kfree(buf); 1490 fuse_copy_finish(cs); 1491 return err; 1492 } 1493 1494 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size, 1495 struct fuse_copy_state *cs) 1496 { 1497 struct fuse_notify_delete_out outarg; 1498 int err = -ENOMEM; 1499 char *buf; 1500 struct qstr name; 1501 1502 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL); 1503 if (!buf) 1504 goto err; 1505 1506 err = -EINVAL; 1507 if (size < sizeof(outarg)) 1508 goto err; 1509 1510 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1511 if (err) 1512 goto err; 1513 1514 err = -ENAMETOOLONG; 1515 if (outarg.namelen > FUSE_NAME_MAX) 1516 goto err; 1517 1518 err = -EINVAL; 1519 if (size != sizeof(outarg) + outarg.namelen + 1) 1520 goto err; 1521 1522 name.name = buf; 1523 name.len = outarg.namelen; 1524 err = fuse_copy_one(cs, buf, outarg.namelen + 1); 1525 if (err) 1526 goto err; 1527 fuse_copy_finish(cs); 1528 buf[outarg.namelen] = 0; 1529 1530 down_read(&fc->killsb); 1531 err = -ENOENT; 1532 if (fc->sb) 1533 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 1534 outarg.child, &name); 1535 up_read(&fc->killsb); 1536 kfree(buf); 1537 return err; 1538 1539 err: 1540 kfree(buf); 1541 fuse_copy_finish(cs); 1542 return err; 1543 } 1544 1545 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size, 1546 struct fuse_copy_state *cs) 1547 { 1548 struct fuse_notify_store_out outarg; 1549 struct inode *inode; 1550 struct address_space *mapping; 1551 u64 nodeid; 1552 int err; 1553 pgoff_t index; 1554 unsigned int offset; 1555 unsigned int num; 1556 loff_t file_size; 1557 loff_t end; 1558 1559 err = -EINVAL; 1560 if (size < sizeof(outarg)) 1561 goto out_finish; 1562 1563 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1564 if (err) 1565 goto out_finish; 1566 1567 err = -EINVAL; 1568 if (size - sizeof(outarg) != outarg.size) 1569 goto out_finish; 1570 1571 nodeid = outarg.nodeid; 1572 1573 down_read(&fc->killsb); 1574 1575 err = -ENOENT; 1576 if (!fc->sb) 1577 goto out_up_killsb; 1578 1579 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid); 1580 if (!inode) 1581 goto out_up_killsb; 1582 1583 mapping = inode->i_mapping; 1584 index = outarg.offset >> PAGE_SHIFT; 1585 offset = outarg.offset & ~PAGE_MASK; 1586 file_size = i_size_read(inode); 1587 end = outarg.offset + outarg.size; 1588 if (end > file_size) { 1589 file_size = end; 1590 fuse_write_update_size(inode, file_size); 1591 } 1592 1593 num = outarg.size; 1594 while (num) { 1595 struct page *page; 1596 unsigned int this_num; 1597 1598 err = -ENOMEM; 1599 page = find_or_create_page(mapping, index, 1600 mapping_gfp_mask(mapping)); 1601 if (!page) 1602 goto out_iput; 1603 1604 this_num = min_t(unsigned, num, PAGE_SIZE - offset); 1605 err = fuse_copy_page(cs, &page, offset, this_num, 0); 1606 if (!err && offset == 0 && 1607 (this_num == PAGE_SIZE || file_size == end)) 1608 SetPageUptodate(page); 1609 unlock_page(page); 1610 put_page(page); 1611 1612 if (err) 1613 goto out_iput; 1614 1615 num -= this_num; 1616 offset = 0; 1617 index++; 1618 } 1619 1620 err = 0; 1621 1622 out_iput: 1623 iput(inode); 1624 out_up_killsb: 1625 up_read(&fc->killsb); 1626 out_finish: 1627 fuse_copy_finish(cs); 1628 return err; 1629 } 1630 1631 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req) 1632 { 1633 release_pages(req->pages, req->num_pages, false); 1634 } 1635 1636 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, 1637 struct fuse_notify_retrieve_out *outarg) 1638 { 1639 int err; 1640 struct address_space *mapping = inode->i_mapping; 1641 struct fuse_req *req; 1642 pgoff_t index; 1643 loff_t file_size; 1644 unsigned int num; 1645 unsigned int offset; 1646 size_t total_len = 0; 1647 int num_pages; 1648 1649 offset = outarg->offset & ~PAGE_MASK; 1650 file_size = i_size_read(inode); 1651 1652 num = outarg->size; 1653 if (outarg->offset > file_size) 1654 num = 0; 1655 else if (outarg->offset + num > file_size) 1656 num = file_size - outarg->offset; 1657 1658 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 1659 num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ); 1660 1661 req = fuse_get_req(fc, num_pages); 1662 if (IS_ERR(req)) 1663 return PTR_ERR(req); 1664 1665 req->in.h.opcode = FUSE_NOTIFY_REPLY; 1666 req->in.h.nodeid = outarg->nodeid; 1667 req->in.numargs = 2; 1668 req->in.argpages = 1; 1669 req->page_descs[0].offset = offset; 1670 req->end = fuse_retrieve_end; 1671 1672 index = outarg->offset >> PAGE_SHIFT; 1673 1674 while (num && req->num_pages < num_pages) { 1675 struct page *page; 1676 unsigned int this_num; 1677 1678 page = find_get_page(mapping, index); 1679 if (!page) 1680 break; 1681 1682 this_num = min_t(unsigned, num, PAGE_SIZE - offset); 1683 req->pages[req->num_pages] = page; 1684 req->page_descs[req->num_pages].length = this_num; 1685 req->num_pages++; 1686 1687 offset = 0; 1688 num -= this_num; 1689 total_len += this_num; 1690 index++; 1691 } 1692 req->misc.retrieve_in.offset = outarg->offset; 1693 req->misc.retrieve_in.size = total_len; 1694 req->in.args[0].size = sizeof(req->misc.retrieve_in); 1695 req->in.args[0].value = &req->misc.retrieve_in; 1696 req->in.args[1].size = total_len; 1697 1698 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique); 1699 if (err) 1700 fuse_retrieve_end(fc, req); 1701 1702 return err; 1703 } 1704 1705 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size, 1706 struct fuse_copy_state *cs) 1707 { 1708 struct fuse_notify_retrieve_out outarg; 1709 struct inode *inode; 1710 int err; 1711 1712 err = -EINVAL; 1713 if (size != sizeof(outarg)) 1714 goto copy_finish; 1715 1716 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1717 if (err) 1718 goto copy_finish; 1719 1720 fuse_copy_finish(cs); 1721 1722 down_read(&fc->killsb); 1723 err = -ENOENT; 1724 if (fc->sb) { 1725 u64 nodeid = outarg.nodeid; 1726 1727 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid); 1728 if (inode) { 1729 err = fuse_retrieve(fc, inode, &outarg); 1730 iput(inode); 1731 } 1732 } 1733 up_read(&fc->killsb); 1734 1735 return err; 1736 1737 copy_finish: 1738 fuse_copy_finish(cs); 1739 return err; 1740 } 1741 1742 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, 1743 unsigned int size, struct fuse_copy_state *cs) 1744 { 1745 /* Don't try to move pages (yet) */ 1746 cs->move_pages = 0; 1747 1748 switch (code) { 1749 case FUSE_NOTIFY_POLL: 1750 return fuse_notify_poll(fc, size, cs); 1751 1752 case FUSE_NOTIFY_INVAL_INODE: 1753 return fuse_notify_inval_inode(fc, size, cs); 1754 1755 case FUSE_NOTIFY_INVAL_ENTRY: 1756 return fuse_notify_inval_entry(fc, size, cs); 1757 1758 case FUSE_NOTIFY_STORE: 1759 return fuse_notify_store(fc, size, cs); 1760 1761 case FUSE_NOTIFY_RETRIEVE: 1762 return fuse_notify_retrieve(fc, size, cs); 1763 1764 case FUSE_NOTIFY_DELETE: 1765 return fuse_notify_delete(fc, size, cs); 1766 1767 default: 1768 fuse_copy_finish(cs); 1769 return -EINVAL; 1770 } 1771 } 1772 1773 /* Look up request on processing list by unique ID */ 1774 static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique) 1775 { 1776 struct fuse_req *req; 1777 1778 list_for_each_entry(req, &fpq->processing, list) { 1779 if (req->in.h.unique == unique || req->intr_unique == unique) 1780 return req; 1781 } 1782 return NULL; 1783 } 1784 1785 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out, 1786 unsigned nbytes) 1787 { 1788 unsigned reqsize = sizeof(struct fuse_out_header); 1789 1790 if (out->h.error) 1791 return nbytes != reqsize ? -EINVAL : 0; 1792 1793 reqsize += len_args(out->numargs, out->args); 1794 1795 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar)) 1796 return -EINVAL; 1797 else if (reqsize > nbytes) { 1798 struct fuse_arg *lastarg = &out->args[out->numargs-1]; 1799 unsigned diffsize = reqsize - nbytes; 1800 if (diffsize > lastarg->size) 1801 return -EINVAL; 1802 lastarg->size -= diffsize; 1803 } 1804 return fuse_copy_args(cs, out->numargs, out->argpages, out->args, 1805 out->page_zeroing); 1806 } 1807 1808 /* 1809 * Write a single reply to a request. First the header is copied from 1810 * the write buffer. The request is then searched on the processing 1811 * list by the unique ID found in the header. If found, then remove 1812 * it from the list and copy the rest of the buffer to the request. 1813 * The request is finished by calling request_end() 1814 */ 1815 static ssize_t fuse_dev_do_write(struct fuse_dev *fud, 1816 struct fuse_copy_state *cs, size_t nbytes) 1817 { 1818 int err; 1819 struct fuse_conn *fc = fud->fc; 1820 struct fuse_pqueue *fpq = &fud->pq; 1821 struct fuse_req *req; 1822 struct fuse_out_header oh; 1823 1824 if (nbytes < sizeof(struct fuse_out_header)) 1825 return -EINVAL; 1826 1827 err = fuse_copy_one(cs, &oh, sizeof(oh)); 1828 if (err) 1829 goto err_finish; 1830 1831 err = -EINVAL; 1832 if (oh.len != nbytes) 1833 goto err_finish; 1834 1835 /* 1836 * Zero oh.unique indicates unsolicited notification message 1837 * and error contains notification code. 1838 */ 1839 if (!oh.unique) { 1840 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs); 1841 return err ? err : nbytes; 1842 } 1843 1844 err = -EINVAL; 1845 if (oh.error <= -1000 || oh.error > 0) 1846 goto err_finish; 1847 1848 spin_lock(&fpq->lock); 1849 err = -ENOENT; 1850 if (!fpq->connected) 1851 goto err_unlock_pq; 1852 1853 req = request_find(fpq, oh.unique); 1854 if (!req) 1855 goto err_unlock_pq; 1856 1857 /* Is it an interrupt reply? */ 1858 if (req->intr_unique == oh.unique) { 1859 spin_unlock(&fpq->lock); 1860 1861 err = -EINVAL; 1862 if (nbytes != sizeof(struct fuse_out_header)) 1863 goto err_finish; 1864 1865 if (oh.error == -ENOSYS) 1866 fc->no_interrupt = 1; 1867 else if (oh.error == -EAGAIN) 1868 queue_interrupt(&fc->iq, req); 1869 1870 fuse_copy_finish(cs); 1871 return nbytes; 1872 } 1873 1874 clear_bit(FR_SENT, &req->flags); 1875 list_move(&req->list, &fpq->io); 1876 req->out.h = oh; 1877 set_bit(FR_LOCKED, &req->flags); 1878 spin_unlock(&fpq->lock); 1879 cs->req = req; 1880 if (!req->out.page_replace) 1881 cs->move_pages = 0; 1882 1883 err = copy_out_args(cs, &req->out, nbytes); 1884 fuse_copy_finish(cs); 1885 1886 spin_lock(&fpq->lock); 1887 clear_bit(FR_LOCKED, &req->flags); 1888 if (!fpq->connected) 1889 err = -ENOENT; 1890 else if (err) 1891 req->out.h.error = -EIO; 1892 if (!test_bit(FR_PRIVATE, &req->flags)) 1893 list_del_init(&req->list); 1894 spin_unlock(&fpq->lock); 1895 1896 request_end(fc, req); 1897 1898 return err ? err : nbytes; 1899 1900 err_unlock_pq: 1901 spin_unlock(&fpq->lock); 1902 err_finish: 1903 fuse_copy_finish(cs); 1904 return err; 1905 } 1906 1907 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from) 1908 { 1909 struct fuse_copy_state cs; 1910 struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp); 1911 1912 if (!fud) 1913 return -EPERM; 1914 1915 if (!iter_is_iovec(from)) 1916 return -EINVAL; 1917 1918 fuse_copy_init(&cs, 0, from); 1919 1920 return fuse_dev_do_write(fud, &cs, iov_iter_count(from)); 1921 } 1922 1923 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, 1924 struct file *out, loff_t *ppos, 1925 size_t len, unsigned int flags) 1926 { 1927 unsigned nbuf; 1928 unsigned idx; 1929 struct pipe_buffer *bufs; 1930 struct fuse_copy_state cs; 1931 struct fuse_dev *fud; 1932 size_t rem; 1933 ssize_t ret; 1934 1935 fud = fuse_get_dev(out); 1936 if (!fud) 1937 return -EPERM; 1938 1939 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL); 1940 if (!bufs) 1941 return -ENOMEM; 1942 1943 pipe_lock(pipe); 1944 nbuf = 0; 1945 rem = 0; 1946 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++) 1947 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len; 1948 1949 ret = -EINVAL; 1950 if (rem < len) { 1951 pipe_unlock(pipe); 1952 goto out; 1953 } 1954 1955 rem = len; 1956 while (rem) { 1957 struct pipe_buffer *ibuf; 1958 struct pipe_buffer *obuf; 1959 1960 BUG_ON(nbuf >= pipe->buffers); 1961 BUG_ON(!pipe->nrbufs); 1962 ibuf = &pipe->bufs[pipe->curbuf]; 1963 obuf = &bufs[nbuf]; 1964 1965 if (rem >= ibuf->len) { 1966 *obuf = *ibuf; 1967 ibuf->ops = NULL; 1968 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); 1969 pipe->nrbufs--; 1970 } else { 1971 pipe_buf_get(pipe, ibuf); 1972 *obuf = *ibuf; 1973 obuf->flags &= ~PIPE_BUF_FLAG_GIFT; 1974 obuf->len = rem; 1975 ibuf->offset += obuf->len; 1976 ibuf->len -= obuf->len; 1977 } 1978 nbuf++; 1979 rem -= obuf->len; 1980 } 1981 pipe_unlock(pipe); 1982 1983 fuse_copy_init(&cs, 0, NULL); 1984 cs.pipebufs = bufs; 1985 cs.nr_segs = nbuf; 1986 cs.pipe = pipe; 1987 1988 if (flags & SPLICE_F_MOVE) 1989 cs.move_pages = 1; 1990 1991 ret = fuse_dev_do_write(fud, &cs, len); 1992 1993 for (idx = 0; idx < nbuf; idx++) 1994 pipe_buf_release(pipe, &bufs[idx]); 1995 1996 out: 1997 kfree(bufs); 1998 return ret; 1999 } 2000 2001 static unsigned fuse_dev_poll(struct file *file, poll_table *wait) 2002 { 2003 unsigned mask = POLLOUT | POLLWRNORM; 2004 struct fuse_iqueue *fiq; 2005 struct fuse_dev *fud = fuse_get_dev(file); 2006 2007 if (!fud) 2008 return POLLERR; 2009 2010 fiq = &fud->fc->iq; 2011 poll_wait(file, &fiq->waitq, wait); 2012 2013 spin_lock(&fiq->waitq.lock); 2014 if (!fiq->connected) 2015 mask = POLLERR; 2016 else if (request_pending(fiq)) 2017 mask |= POLLIN | POLLRDNORM; 2018 spin_unlock(&fiq->waitq.lock); 2019 2020 return mask; 2021 } 2022 2023 /* 2024 * Abort all requests on the given list (pending or processing) 2025 * 2026 * This function releases and reacquires fc->lock 2027 */ 2028 static void end_requests(struct fuse_conn *fc, struct list_head *head) 2029 { 2030 while (!list_empty(head)) { 2031 struct fuse_req *req; 2032 req = list_entry(head->next, struct fuse_req, list); 2033 req->out.h.error = -ECONNABORTED; 2034 clear_bit(FR_SENT, &req->flags); 2035 list_del_init(&req->list); 2036 request_end(fc, req); 2037 } 2038 } 2039 2040 static void end_polls(struct fuse_conn *fc) 2041 { 2042 struct rb_node *p; 2043 2044 p = rb_first(&fc->polled_files); 2045 2046 while (p) { 2047 struct fuse_file *ff; 2048 ff = rb_entry(p, struct fuse_file, polled_node); 2049 wake_up_interruptible_all(&ff->poll_wait); 2050 2051 p = rb_next(p); 2052 } 2053 } 2054 2055 /* 2056 * Abort all requests. 2057 * 2058 * Emergency exit in case of a malicious or accidental deadlock, or just a hung 2059 * filesystem. 2060 * 2061 * The same effect is usually achievable through killing the filesystem daemon 2062 * and all users of the filesystem. The exception is the combination of an 2063 * asynchronous request and the tricky deadlock (see 2064 * Documentation/filesystems/fuse.txt). 2065 * 2066 * Aborting requests under I/O goes as follows: 1: Separate out unlocked 2067 * requests, they should be finished off immediately. Locked requests will be 2068 * finished after unlock; see unlock_request(). 2: Finish off the unlocked 2069 * requests. It is possible that some request will finish before we can. This 2070 * is OK, the request will in that case be removed from the list before we touch 2071 * it. 2072 */ 2073 void fuse_abort_conn(struct fuse_conn *fc) 2074 { 2075 struct fuse_iqueue *fiq = &fc->iq; 2076 2077 spin_lock(&fc->lock); 2078 if (fc->connected) { 2079 struct fuse_dev *fud; 2080 struct fuse_req *req, *next; 2081 LIST_HEAD(to_end1); 2082 LIST_HEAD(to_end2); 2083 2084 fc->connected = 0; 2085 fc->blocked = 0; 2086 fuse_set_initialized(fc); 2087 list_for_each_entry(fud, &fc->devices, entry) { 2088 struct fuse_pqueue *fpq = &fud->pq; 2089 2090 spin_lock(&fpq->lock); 2091 fpq->connected = 0; 2092 list_for_each_entry_safe(req, next, &fpq->io, list) { 2093 req->out.h.error = -ECONNABORTED; 2094 spin_lock(&req->waitq.lock); 2095 set_bit(FR_ABORTED, &req->flags); 2096 if (!test_bit(FR_LOCKED, &req->flags)) { 2097 set_bit(FR_PRIVATE, &req->flags); 2098 list_move(&req->list, &to_end1); 2099 } 2100 spin_unlock(&req->waitq.lock); 2101 } 2102 list_splice_init(&fpq->processing, &to_end2); 2103 spin_unlock(&fpq->lock); 2104 } 2105 fc->max_background = UINT_MAX; 2106 flush_bg_queue(fc); 2107 2108 spin_lock(&fiq->waitq.lock); 2109 fiq->connected = 0; 2110 list_splice_init(&fiq->pending, &to_end2); 2111 list_for_each_entry(req, &to_end2, list) 2112 clear_bit(FR_PENDING, &req->flags); 2113 while (forget_pending(fiq)) 2114 kfree(dequeue_forget(fiq, 1, NULL)); 2115 wake_up_all_locked(&fiq->waitq); 2116 spin_unlock(&fiq->waitq.lock); 2117 kill_fasync(&fiq->fasync, SIGIO, POLL_IN); 2118 end_polls(fc); 2119 wake_up_all(&fc->blocked_waitq); 2120 spin_unlock(&fc->lock); 2121 2122 while (!list_empty(&to_end1)) { 2123 req = list_first_entry(&to_end1, struct fuse_req, list); 2124 __fuse_get_request(req); 2125 list_del_init(&req->list); 2126 request_end(fc, req); 2127 } 2128 end_requests(fc, &to_end2); 2129 } else { 2130 spin_unlock(&fc->lock); 2131 } 2132 } 2133 EXPORT_SYMBOL_GPL(fuse_abort_conn); 2134 2135 int fuse_dev_release(struct inode *inode, struct file *file) 2136 { 2137 struct fuse_dev *fud = fuse_get_dev(file); 2138 2139 if (fud) { 2140 struct fuse_conn *fc = fud->fc; 2141 struct fuse_pqueue *fpq = &fud->pq; 2142 2143 WARN_ON(!list_empty(&fpq->io)); 2144 end_requests(fc, &fpq->processing); 2145 /* Are we the last open device? */ 2146 if (atomic_dec_and_test(&fc->dev_count)) { 2147 WARN_ON(fc->iq.fasync != NULL); 2148 fuse_abort_conn(fc); 2149 } 2150 fuse_dev_free(fud); 2151 } 2152 return 0; 2153 } 2154 EXPORT_SYMBOL_GPL(fuse_dev_release); 2155 2156 static int fuse_dev_fasync(int fd, struct file *file, int on) 2157 { 2158 struct fuse_dev *fud = fuse_get_dev(file); 2159 2160 if (!fud) 2161 return -EPERM; 2162 2163 /* No locking - fasync_helper does its own locking */ 2164 return fasync_helper(fd, file, on, &fud->fc->iq.fasync); 2165 } 2166 2167 static int fuse_device_clone(struct fuse_conn *fc, struct file *new) 2168 { 2169 struct fuse_dev *fud; 2170 2171 if (new->private_data) 2172 return -EINVAL; 2173 2174 fud = fuse_dev_alloc(fc); 2175 if (!fud) 2176 return -ENOMEM; 2177 2178 new->private_data = fud; 2179 atomic_inc(&fc->dev_count); 2180 2181 return 0; 2182 } 2183 2184 static long fuse_dev_ioctl(struct file *file, unsigned int cmd, 2185 unsigned long arg) 2186 { 2187 int err = -ENOTTY; 2188 2189 if (cmd == FUSE_DEV_IOC_CLONE) { 2190 int oldfd; 2191 2192 err = -EFAULT; 2193 if (!get_user(oldfd, (__u32 __user *) arg)) { 2194 struct file *old = fget(oldfd); 2195 2196 err = -EINVAL; 2197 if (old) { 2198 struct fuse_dev *fud = NULL; 2199 2200 /* 2201 * Check against file->f_op because CUSE 2202 * uses the same ioctl handler. 2203 */ 2204 if (old->f_op == file->f_op && 2205 old->f_cred->user_ns == file->f_cred->user_ns) 2206 fud = fuse_get_dev(old); 2207 2208 if (fud) { 2209 mutex_lock(&fuse_mutex); 2210 err = fuse_device_clone(fud->fc, file); 2211 mutex_unlock(&fuse_mutex); 2212 } 2213 fput(old); 2214 } 2215 } 2216 } 2217 return err; 2218 } 2219 2220 const struct file_operations fuse_dev_operations = { 2221 .owner = THIS_MODULE, 2222 .open = fuse_dev_open, 2223 .llseek = no_llseek, 2224 .read_iter = fuse_dev_read, 2225 .splice_read = fuse_dev_splice_read, 2226 .write_iter = fuse_dev_write, 2227 .splice_write = fuse_dev_splice_write, 2228 .poll = fuse_dev_poll, 2229 .release = fuse_dev_release, 2230 .fasync = fuse_dev_fasync, 2231 .unlocked_ioctl = fuse_dev_ioctl, 2232 .compat_ioctl = fuse_dev_ioctl, 2233 }; 2234 EXPORT_SYMBOL_GPL(fuse_dev_operations); 2235 2236 static struct miscdevice fuse_miscdevice = { 2237 .minor = FUSE_MINOR, 2238 .name = "fuse", 2239 .fops = &fuse_dev_operations, 2240 }; 2241 2242 int __init fuse_dev_init(void) 2243 { 2244 int err = -ENOMEM; 2245 fuse_req_cachep = kmem_cache_create("fuse_request", 2246 sizeof(struct fuse_req), 2247 0, 0, NULL); 2248 if (!fuse_req_cachep) 2249 goto out; 2250 2251 err = misc_register(&fuse_miscdevice); 2252 if (err) 2253 goto out_cache_clean; 2254 2255 return 0; 2256 2257 out_cache_clean: 2258 kmem_cache_destroy(fuse_req_cachep); 2259 out: 2260 return err; 2261 } 2262 2263 void fuse_dev_cleanup(void) 2264 { 2265 misc_deregister(&fuse_miscdevice); 2266 kmem_cache_destroy(fuse_req_cachep); 2267 } 2268