1 /* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7 */ 8 9 #include "fuse_i.h" 10 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/poll.h> 14 #include <linux/uio.h> 15 #include <linux/miscdevice.h> 16 #include <linux/pagemap.h> 17 #include <linux/file.h> 18 #include <linux/slab.h> 19 20 MODULE_ALIAS_MISCDEV(FUSE_MINOR); 21 22 static struct kmem_cache *fuse_req_cachep; 23 24 static struct fuse_conn *fuse_get_conn(struct file *file) 25 { 26 /* 27 * Lockless access is OK, because file->private data is set 28 * once during mount and is valid until the file is released. 29 */ 30 return file->private_data; 31 } 32 33 static void fuse_request_init(struct fuse_req *req) 34 { 35 memset(req, 0, sizeof(*req)); 36 INIT_LIST_HEAD(&req->list); 37 INIT_LIST_HEAD(&req->intr_entry); 38 init_waitqueue_head(&req->waitq); 39 atomic_set(&req->count, 1); 40 } 41 42 struct fuse_req *fuse_request_alloc(void) 43 { 44 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL); 45 if (req) 46 fuse_request_init(req); 47 return req; 48 } 49 50 struct fuse_req *fuse_request_alloc_nofs(void) 51 { 52 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS); 53 if (req) 54 fuse_request_init(req); 55 return req; 56 } 57 58 void fuse_request_free(struct fuse_req *req) 59 { 60 kmem_cache_free(fuse_req_cachep, req); 61 } 62 63 static void block_sigs(sigset_t *oldset) 64 { 65 sigset_t mask; 66 67 siginitsetinv(&mask, sigmask(SIGKILL)); 68 sigprocmask(SIG_BLOCK, &mask, oldset); 69 } 70 71 static void restore_sigs(sigset_t *oldset) 72 { 73 sigprocmask(SIG_SETMASK, oldset, NULL); 74 } 75 76 static void __fuse_get_request(struct fuse_req *req) 77 { 78 atomic_inc(&req->count); 79 } 80 81 /* Must be called with > 1 refcount */ 82 static void __fuse_put_request(struct fuse_req *req) 83 { 84 BUG_ON(atomic_read(&req->count) < 2); 85 atomic_dec(&req->count); 86 } 87 88 static void fuse_req_init_context(struct fuse_req *req) 89 { 90 req->in.h.uid = current->fsuid; 91 req->in.h.gid = current->fsgid; 92 req->in.h.pid = current->pid; 93 } 94 95 struct fuse_req *fuse_get_req(struct fuse_conn *fc) 96 { 97 struct fuse_req *req; 98 sigset_t oldset; 99 int intr; 100 int err; 101 102 atomic_inc(&fc->num_waiting); 103 block_sigs(&oldset); 104 intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked); 105 restore_sigs(&oldset); 106 err = -EINTR; 107 if (intr) 108 goto out; 109 110 err = -ENOTCONN; 111 if (!fc->connected) 112 goto out; 113 114 req = fuse_request_alloc(); 115 err = -ENOMEM; 116 if (!req) 117 goto out; 118 119 fuse_req_init_context(req); 120 req->waiting = 1; 121 return req; 122 123 out: 124 atomic_dec(&fc->num_waiting); 125 return ERR_PTR(err); 126 } 127 128 /* 129 * Return request in fuse_file->reserved_req. However that may 130 * currently be in use. If that is the case, wait for it to become 131 * available. 132 */ 133 static struct fuse_req *get_reserved_req(struct fuse_conn *fc, 134 struct file *file) 135 { 136 struct fuse_req *req = NULL; 137 struct fuse_file *ff = file->private_data; 138 139 do { 140 wait_event(fc->reserved_req_waitq, ff->reserved_req); 141 spin_lock(&fc->lock); 142 if (ff->reserved_req) { 143 req = ff->reserved_req; 144 ff->reserved_req = NULL; 145 get_file(file); 146 req->stolen_file = file; 147 } 148 spin_unlock(&fc->lock); 149 } while (!req); 150 151 return req; 152 } 153 154 /* 155 * Put stolen request back into fuse_file->reserved_req 156 */ 157 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req) 158 { 159 struct file *file = req->stolen_file; 160 struct fuse_file *ff = file->private_data; 161 162 spin_lock(&fc->lock); 163 fuse_request_init(req); 164 BUG_ON(ff->reserved_req); 165 ff->reserved_req = req; 166 wake_up_all(&fc->reserved_req_waitq); 167 spin_unlock(&fc->lock); 168 fput(file); 169 } 170 171 /* 172 * Gets a requests for a file operation, always succeeds 173 * 174 * This is used for sending the FLUSH request, which must get to 175 * userspace, due to POSIX locks which may need to be unlocked. 176 * 177 * If allocation fails due to OOM, use the reserved request in 178 * fuse_file. 179 * 180 * This is very unlikely to deadlock accidentally, since the 181 * filesystem should not have it's own file open. If deadlock is 182 * intentional, it can still be broken by "aborting" the filesystem. 183 */ 184 struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file) 185 { 186 struct fuse_req *req; 187 188 atomic_inc(&fc->num_waiting); 189 wait_event(fc->blocked_waitq, !fc->blocked); 190 req = fuse_request_alloc(); 191 if (!req) 192 req = get_reserved_req(fc, file); 193 194 fuse_req_init_context(req); 195 req->waiting = 1; 196 return req; 197 } 198 199 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) 200 { 201 if (atomic_dec_and_test(&req->count)) { 202 if (req->waiting) 203 atomic_dec(&fc->num_waiting); 204 205 if (req->stolen_file) 206 put_reserved_req(fc, req); 207 else 208 fuse_request_free(req); 209 } 210 } 211 212 static unsigned len_args(unsigned numargs, struct fuse_arg *args) 213 { 214 unsigned nbytes = 0; 215 unsigned i; 216 217 for (i = 0; i < numargs; i++) 218 nbytes += args[i].size; 219 220 return nbytes; 221 } 222 223 static u64 fuse_get_unique(struct fuse_conn *fc) 224 { 225 fc->reqctr++; 226 /* zero is special */ 227 if (fc->reqctr == 0) 228 fc->reqctr = 1; 229 230 return fc->reqctr; 231 } 232 233 static void queue_request(struct fuse_conn *fc, struct fuse_req *req) 234 { 235 req->in.h.unique = fuse_get_unique(fc); 236 req->in.h.len = sizeof(struct fuse_in_header) + 237 len_args(req->in.numargs, (struct fuse_arg *) req->in.args); 238 list_add_tail(&req->list, &fc->pending); 239 req->state = FUSE_REQ_PENDING; 240 if (!req->waiting) { 241 req->waiting = 1; 242 atomic_inc(&fc->num_waiting); 243 } 244 wake_up(&fc->waitq); 245 kill_fasync(&fc->fasync, SIGIO, POLL_IN); 246 } 247 248 static void flush_bg_queue(struct fuse_conn *fc) 249 { 250 while (fc->active_background < FUSE_MAX_BACKGROUND && 251 !list_empty(&fc->bg_queue)) { 252 struct fuse_req *req; 253 254 req = list_entry(fc->bg_queue.next, struct fuse_req, list); 255 list_del(&req->list); 256 fc->active_background++; 257 queue_request(fc, req); 258 } 259 } 260 261 /* 262 * This function is called when a request is finished. Either a reply 263 * has arrived or it was aborted (and not yet sent) or some error 264 * occurred during communication with userspace, or the device file 265 * was closed. The requester thread is woken up (if still waiting), 266 * the 'end' callback is called if given, else the reference to the 267 * request is released 268 * 269 * Called with fc->lock, unlocks it 270 */ 271 static void request_end(struct fuse_conn *fc, struct fuse_req *req) 272 __releases(fc->lock) 273 { 274 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; 275 req->end = NULL; 276 list_del(&req->list); 277 list_del(&req->intr_entry); 278 req->state = FUSE_REQ_FINISHED; 279 if (req->background) { 280 if (fc->num_background == FUSE_MAX_BACKGROUND) { 281 fc->blocked = 0; 282 wake_up_all(&fc->blocked_waitq); 283 } 284 if (fc->num_background == FUSE_CONGESTION_THRESHOLD) { 285 clear_bdi_congested(&fc->bdi, READ); 286 clear_bdi_congested(&fc->bdi, WRITE); 287 } 288 fc->num_background--; 289 fc->active_background--; 290 flush_bg_queue(fc); 291 } 292 spin_unlock(&fc->lock); 293 wake_up(&req->waitq); 294 if (end) 295 end(fc, req); 296 else 297 fuse_put_request(fc, req); 298 } 299 300 static void wait_answer_interruptible(struct fuse_conn *fc, 301 struct fuse_req *req) 302 __releases(fc->lock) __acquires(fc->lock) 303 { 304 if (signal_pending(current)) 305 return; 306 307 spin_unlock(&fc->lock); 308 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); 309 spin_lock(&fc->lock); 310 } 311 312 static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req) 313 { 314 list_add_tail(&req->intr_entry, &fc->interrupts); 315 wake_up(&fc->waitq); 316 kill_fasync(&fc->fasync, SIGIO, POLL_IN); 317 } 318 319 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) 320 __releases(fc->lock) __acquires(fc->lock) 321 { 322 if (!fc->no_interrupt) { 323 /* Any signal may interrupt this */ 324 wait_answer_interruptible(fc, req); 325 326 if (req->aborted) 327 goto aborted; 328 if (req->state == FUSE_REQ_FINISHED) 329 return; 330 331 req->interrupted = 1; 332 if (req->state == FUSE_REQ_SENT) 333 queue_interrupt(fc, req); 334 } 335 336 if (!req->force) { 337 sigset_t oldset; 338 339 /* Only fatal signals may interrupt this */ 340 block_sigs(&oldset); 341 wait_answer_interruptible(fc, req); 342 restore_sigs(&oldset); 343 344 if (req->aborted) 345 goto aborted; 346 if (req->state == FUSE_REQ_FINISHED) 347 return; 348 349 /* Request is not yet in userspace, bail out */ 350 if (req->state == FUSE_REQ_PENDING) { 351 list_del(&req->list); 352 __fuse_put_request(req); 353 req->out.h.error = -EINTR; 354 return; 355 } 356 } 357 358 /* 359 * Either request is already in userspace, or it was forced. 360 * Wait it out. 361 */ 362 spin_unlock(&fc->lock); 363 wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); 364 spin_lock(&fc->lock); 365 366 if (!req->aborted) 367 return; 368 369 aborted: 370 BUG_ON(req->state != FUSE_REQ_FINISHED); 371 if (req->locked) { 372 /* This is uninterruptible sleep, because data is 373 being copied to/from the buffers of req. During 374 locked state, there mustn't be any filesystem 375 operation (e.g. page fault), since that could lead 376 to deadlock */ 377 spin_unlock(&fc->lock); 378 wait_event(req->waitq, !req->locked); 379 spin_lock(&fc->lock); 380 } 381 } 382 383 void request_send(struct fuse_conn *fc, struct fuse_req *req) 384 { 385 req->isreply = 1; 386 spin_lock(&fc->lock); 387 if (!fc->connected) 388 req->out.h.error = -ENOTCONN; 389 else if (fc->conn_error) 390 req->out.h.error = -ECONNREFUSED; 391 else { 392 queue_request(fc, req); 393 /* acquire extra reference, since request is still needed 394 after request_end() */ 395 __fuse_get_request(req); 396 397 request_wait_answer(fc, req); 398 } 399 spin_unlock(&fc->lock); 400 } 401 402 static void request_send_nowait_locked(struct fuse_conn *fc, 403 struct fuse_req *req) 404 { 405 req->background = 1; 406 fc->num_background++; 407 if (fc->num_background == FUSE_MAX_BACKGROUND) 408 fc->blocked = 1; 409 if (fc->num_background == FUSE_CONGESTION_THRESHOLD) { 410 set_bdi_congested(&fc->bdi, READ); 411 set_bdi_congested(&fc->bdi, WRITE); 412 } 413 list_add_tail(&req->list, &fc->bg_queue); 414 flush_bg_queue(fc); 415 } 416 417 static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) 418 { 419 spin_lock(&fc->lock); 420 if (fc->connected) { 421 request_send_nowait_locked(fc, req); 422 spin_unlock(&fc->lock); 423 } else { 424 req->out.h.error = -ENOTCONN; 425 request_end(fc, req); 426 } 427 } 428 429 void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req) 430 { 431 req->isreply = 0; 432 request_send_nowait(fc, req); 433 } 434 435 void request_send_background(struct fuse_conn *fc, struct fuse_req *req) 436 { 437 req->isreply = 1; 438 request_send_nowait(fc, req); 439 } 440 441 /* 442 * Called under fc->lock 443 * 444 * fc->connected must have been checked previously 445 */ 446 void request_send_background_locked(struct fuse_conn *fc, struct fuse_req *req) 447 { 448 req->isreply = 1; 449 request_send_nowait_locked(fc, req); 450 } 451 452 /* 453 * Lock the request. Up to the next unlock_request() there mustn't be 454 * anything that could cause a page-fault. If the request was already 455 * aborted bail out. 456 */ 457 static int lock_request(struct fuse_conn *fc, struct fuse_req *req) 458 { 459 int err = 0; 460 if (req) { 461 spin_lock(&fc->lock); 462 if (req->aborted) 463 err = -ENOENT; 464 else 465 req->locked = 1; 466 spin_unlock(&fc->lock); 467 } 468 return err; 469 } 470 471 /* 472 * Unlock request. If it was aborted during being locked, the 473 * requester thread is currently waiting for it to be unlocked, so 474 * wake it up. 475 */ 476 static void unlock_request(struct fuse_conn *fc, struct fuse_req *req) 477 { 478 if (req) { 479 spin_lock(&fc->lock); 480 req->locked = 0; 481 if (req->aborted) 482 wake_up(&req->waitq); 483 spin_unlock(&fc->lock); 484 } 485 } 486 487 struct fuse_copy_state { 488 struct fuse_conn *fc; 489 int write; 490 struct fuse_req *req; 491 const struct iovec *iov; 492 unsigned long nr_segs; 493 unsigned long seglen; 494 unsigned long addr; 495 struct page *pg; 496 void *mapaddr; 497 void *buf; 498 unsigned len; 499 }; 500 501 static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc, 502 int write, struct fuse_req *req, 503 const struct iovec *iov, unsigned long nr_segs) 504 { 505 memset(cs, 0, sizeof(*cs)); 506 cs->fc = fc; 507 cs->write = write; 508 cs->req = req; 509 cs->iov = iov; 510 cs->nr_segs = nr_segs; 511 } 512 513 /* Unmap and put previous page of userspace buffer */ 514 static void fuse_copy_finish(struct fuse_copy_state *cs) 515 { 516 if (cs->mapaddr) { 517 kunmap_atomic(cs->mapaddr, KM_USER0); 518 if (cs->write) { 519 flush_dcache_page(cs->pg); 520 set_page_dirty_lock(cs->pg); 521 } 522 put_page(cs->pg); 523 cs->mapaddr = NULL; 524 } 525 } 526 527 /* 528 * Get another pagefull of userspace buffer, and map it to kernel 529 * address space, and lock request 530 */ 531 static int fuse_copy_fill(struct fuse_copy_state *cs) 532 { 533 unsigned long offset; 534 int err; 535 536 unlock_request(cs->fc, cs->req); 537 fuse_copy_finish(cs); 538 if (!cs->seglen) { 539 BUG_ON(!cs->nr_segs); 540 cs->seglen = cs->iov[0].iov_len; 541 cs->addr = (unsigned long) cs->iov[0].iov_base; 542 cs->iov ++; 543 cs->nr_segs --; 544 } 545 down_read(¤t->mm->mmap_sem); 546 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0, 547 &cs->pg, NULL); 548 up_read(¤t->mm->mmap_sem); 549 if (err < 0) 550 return err; 551 BUG_ON(err != 1); 552 offset = cs->addr % PAGE_SIZE; 553 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0); 554 cs->buf = cs->mapaddr + offset; 555 cs->len = min(PAGE_SIZE - offset, cs->seglen); 556 cs->seglen -= cs->len; 557 cs->addr += cs->len; 558 559 return lock_request(cs->fc, cs->req); 560 } 561 562 /* Do as much copy to/from userspace buffer as we can */ 563 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) 564 { 565 unsigned ncpy = min(*size, cs->len); 566 if (val) { 567 if (cs->write) 568 memcpy(cs->buf, *val, ncpy); 569 else 570 memcpy(*val, cs->buf, ncpy); 571 *val += ncpy; 572 } 573 *size -= ncpy; 574 cs->len -= ncpy; 575 cs->buf += ncpy; 576 return ncpy; 577 } 578 579 /* 580 * Copy a page in the request to/from the userspace buffer. Must be 581 * done atomically 582 */ 583 static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page, 584 unsigned offset, unsigned count, int zeroing) 585 { 586 if (page && zeroing && count < PAGE_SIZE) { 587 void *mapaddr = kmap_atomic(page, KM_USER1); 588 memset(mapaddr, 0, PAGE_SIZE); 589 kunmap_atomic(mapaddr, KM_USER1); 590 } 591 while (count) { 592 int err; 593 if (!cs->len && (err = fuse_copy_fill(cs))) 594 return err; 595 if (page) { 596 void *mapaddr = kmap_atomic(page, KM_USER1); 597 void *buf = mapaddr + offset; 598 offset += fuse_copy_do(cs, &buf, &count); 599 kunmap_atomic(mapaddr, KM_USER1); 600 } else 601 offset += fuse_copy_do(cs, NULL, &count); 602 } 603 if (page && !cs->write) 604 flush_dcache_page(page); 605 return 0; 606 } 607 608 /* Copy pages in the request to/from userspace buffer */ 609 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes, 610 int zeroing) 611 { 612 unsigned i; 613 struct fuse_req *req = cs->req; 614 unsigned offset = req->page_offset; 615 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset); 616 617 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) { 618 struct page *page = req->pages[i]; 619 int err = fuse_copy_page(cs, page, offset, count, zeroing); 620 if (err) 621 return err; 622 623 nbytes -= count; 624 count = min(nbytes, (unsigned) PAGE_SIZE); 625 offset = 0; 626 } 627 return 0; 628 } 629 630 /* Copy a single argument in the request to/from userspace buffer */ 631 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size) 632 { 633 while (size) { 634 int err; 635 if (!cs->len && (err = fuse_copy_fill(cs))) 636 return err; 637 fuse_copy_do(cs, &val, &size); 638 } 639 return 0; 640 } 641 642 /* Copy request arguments to/from userspace buffer */ 643 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, 644 unsigned argpages, struct fuse_arg *args, 645 int zeroing) 646 { 647 int err = 0; 648 unsigned i; 649 650 for (i = 0; !err && i < numargs; i++) { 651 struct fuse_arg *arg = &args[i]; 652 if (i == numargs - 1 && argpages) 653 err = fuse_copy_pages(cs, arg->size, zeroing); 654 else 655 err = fuse_copy_one(cs, arg->value, arg->size); 656 } 657 return err; 658 } 659 660 static int request_pending(struct fuse_conn *fc) 661 { 662 return !list_empty(&fc->pending) || !list_empty(&fc->interrupts); 663 } 664 665 /* Wait until a request is available on the pending list */ 666 static void request_wait(struct fuse_conn *fc) 667 { 668 DECLARE_WAITQUEUE(wait, current); 669 670 add_wait_queue_exclusive(&fc->waitq, &wait); 671 while (fc->connected && !request_pending(fc)) { 672 set_current_state(TASK_INTERRUPTIBLE); 673 if (signal_pending(current)) 674 break; 675 676 spin_unlock(&fc->lock); 677 schedule(); 678 spin_lock(&fc->lock); 679 } 680 set_current_state(TASK_RUNNING); 681 remove_wait_queue(&fc->waitq, &wait); 682 } 683 684 /* 685 * Transfer an interrupt request to userspace 686 * 687 * Unlike other requests this is assembled on demand, without a need 688 * to allocate a separate fuse_req structure. 689 * 690 * Called with fc->lock held, releases it 691 */ 692 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req, 693 const struct iovec *iov, unsigned long nr_segs) 694 __releases(fc->lock) 695 { 696 struct fuse_copy_state cs; 697 struct fuse_in_header ih; 698 struct fuse_interrupt_in arg; 699 unsigned reqsize = sizeof(ih) + sizeof(arg); 700 int err; 701 702 list_del_init(&req->intr_entry); 703 req->intr_unique = fuse_get_unique(fc); 704 memset(&ih, 0, sizeof(ih)); 705 memset(&arg, 0, sizeof(arg)); 706 ih.len = reqsize; 707 ih.opcode = FUSE_INTERRUPT; 708 ih.unique = req->intr_unique; 709 arg.unique = req->in.h.unique; 710 711 spin_unlock(&fc->lock); 712 if (iov_length(iov, nr_segs) < reqsize) 713 return -EINVAL; 714 715 fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs); 716 err = fuse_copy_one(&cs, &ih, sizeof(ih)); 717 if (!err) 718 err = fuse_copy_one(&cs, &arg, sizeof(arg)); 719 fuse_copy_finish(&cs); 720 721 return err ? err : reqsize; 722 } 723 724 /* 725 * Read a single request into the userspace filesystem's buffer. This 726 * function waits until a request is available, then removes it from 727 * the pending list and copies request data to userspace buffer. If 728 * no reply is needed (FORGET) or request has been aborted or there 729 * was an error during the copying then it's finished by calling 730 * request_end(). Otherwise add it to the processing list, and set 731 * the 'sent' flag. 732 */ 733 static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, 734 unsigned long nr_segs, loff_t pos) 735 { 736 int err; 737 struct fuse_req *req; 738 struct fuse_in *in; 739 struct fuse_copy_state cs; 740 unsigned reqsize; 741 struct file *file = iocb->ki_filp; 742 struct fuse_conn *fc = fuse_get_conn(file); 743 if (!fc) 744 return -EPERM; 745 746 restart: 747 spin_lock(&fc->lock); 748 err = -EAGAIN; 749 if ((file->f_flags & O_NONBLOCK) && fc->connected && 750 !request_pending(fc)) 751 goto err_unlock; 752 753 request_wait(fc); 754 err = -ENODEV; 755 if (!fc->connected) 756 goto err_unlock; 757 err = -ERESTARTSYS; 758 if (!request_pending(fc)) 759 goto err_unlock; 760 761 if (!list_empty(&fc->interrupts)) { 762 req = list_entry(fc->interrupts.next, struct fuse_req, 763 intr_entry); 764 return fuse_read_interrupt(fc, req, iov, nr_segs); 765 } 766 767 req = list_entry(fc->pending.next, struct fuse_req, list); 768 req->state = FUSE_REQ_READING; 769 list_move(&req->list, &fc->io); 770 771 in = &req->in; 772 reqsize = in->h.len; 773 /* If request is too large, reply with an error and restart the read */ 774 if (iov_length(iov, nr_segs) < reqsize) { 775 req->out.h.error = -EIO; 776 /* SETXATTR is special, since it may contain too large data */ 777 if (in->h.opcode == FUSE_SETXATTR) 778 req->out.h.error = -E2BIG; 779 request_end(fc, req); 780 goto restart; 781 } 782 spin_unlock(&fc->lock); 783 fuse_copy_init(&cs, fc, 1, req, iov, nr_segs); 784 err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); 785 if (!err) 786 err = fuse_copy_args(&cs, in->numargs, in->argpages, 787 (struct fuse_arg *) in->args, 0); 788 fuse_copy_finish(&cs); 789 spin_lock(&fc->lock); 790 req->locked = 0; 791 if (req->aborted) { 792 request_end(fc, req); 793 return -ENODEV; 794 } 795 if (err) { 796 req->out.h.error = -EIO; 797 request_end(fc, req); 798 return err; 799 } 800 if (!req->isreply) 801 request_end(fc, req); 802 else { 803 req->state = FUSE_REQ_SENT; 804 list_move_tail(&req->list, &fc->processing); 805 if (req->interrupted) 806 queue_interrupt(fc, req); 807 spin_unlock(&fc->lock); 808 } 809 return reqsize; 810 811 err_unlock: 812 spin_unlock(&fc->lock); 813 return err; 814 } 815 816 /* Look up request on processing list by unique ID */ 817 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique) 818 { 819 struct list_head *entry; 820 821 list_for_each(entry, &fc->processing) { 822 struct fuse_req *req; 823 req = list_entry(entry, struct fuse_req, list); 824 if (req->in.h.unique == unique || req->intr_unique == unique) 825 return req; 826 } 827 return NULL; 828 } 829 830 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out, 831 unsigned nbytes) 832 { 833 unsigned reqsize = sizeof(struct fuse_out_header); 834 835 if (out->h.error) 836 return nbytes != reqsize ? -EINVAL : 0; 837 838 reqsize += len_args(out->numargs, out->args); 839 840 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar)) 841 return -EINVAL; 842 else if (reqsize > nbytes) { 843 struct fuse_arg *lastarg = &out->args[out->numargs-1]; 844 unsigned diffsize = reqsize - nbytes; 845 if (diffsize > lastarg->size) 846 return -EINVAL; 847 lastarg->size -= diffsize; 848 } 849 return fuse_copy_args(cs, out->numargs, out->argpages, out->args, 850 out->page_zeroing); 851 } 852 853 /* 854 * Write a single reply to a request. First the header is copied from 855 * the write buffer. The request is then searched on the processing 856 * list by the unique ID found in the header. If found, then remove 857 * it from the list and copy the rest of the buffer to the request. 858 * The request is finished by calling request_end() 859 */ 860 static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, 861 unsigned long nr_segs, loff_t pos) 862 { 863 int err; 864 unsigned nbytes = iov_length(iov, nr_segs); 865 struct fuse_req *req; 866 struct fuse_out_header oh; 867 struct fuse_copy_state cs; 868 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp); 869 if (!fc) 870 return -EPERM; 871 872 fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs); 873 if (nbytes < sizeof(struct fuse_out_header)) 874 return -EINVAL; 875 876 err = fuse_copy_one(&cs, &oh, sizeof(oh)); 877 if (err) 878 goto err_finish; 879 err = -EINVAL; 880 if (!oh.unique || oh.error <= -1000 || oh.error > 0 || 881 oh.len != nbytes) 882 goto err_finish; 883 884 spin_lock(&fc->lock); 885 err = -ENOENT; 886 if (!fc->connected) 887 goto err_unlock; 888 889 req = request_find(fc, oh.unique); 890 if (!req) 891 goto err_unlock; 892 893 if (req->aborted) { 894 spin_unlock(&fc->lock); 895 fuse_copy_finish(&cs); 896 spin_lock(&fc->lock); 897 request_end(fc, req); 898 return -ENOENT; 899 } 900 /* Is it an interrupt reply? */ 901 if (req->intr_unique == oh.unique) { 902 err = -EINVAL; 903 if (nbytes != sizeof(struct fuse_out_header)) 904 goto err_unlock; 905 906 if (oh.error == -ENOSYS) 907 fc->no_interrupt = 1; 908 else if (oh.error == -EAGAIN) 909 queue_interrupt(fc, req); 910 911 spin_unlock(&fc->lock); 912 fuse_copy_finish(&cs); 913 return nbytes; 914 } 915 916 req->state = FUSE_REQ_WRITING; 917 list_move(&req->list, &fc->io); 918 req->out.h = oh; 919 req->locked = 1; 920 cs.req = req; 921 spin_unlock(&fc->lock); 922 923 err = copy_out_args(&cs, &req->out, nbytes); 924 fuse_copy_finish(&cs); 925 926 spin_lock(&fc->lock); 927 req->locked = 0; 928 if (!err) { 929 if (req->aborted) 930 err = -ENOENT; 931 } else if (!req->aborted) 932 req->out.h.error = -EIO; 933 request_end(fc, req); 934 935 return err ? err : nbytes; 936 937 err_unlock: 938 spin_unlock(&fc->lock); 939 err_finish: 940 fuse_copy_finish(&cs); 941 return err; 942 } 943 944 static unsigned fuse_dev_poll(struct file *file, poll_table *wait) 945 { 946 unsigned mask = POLLOUT | POLLWRNORM; 947 struct fuse_conn *fc = fuse_get_conn(file); 948 if (!fc) 949 return POLLERR; 950 951 poll_wait(file, &fc->waitq, wait); 952 953 spin_lock(&fc->lock); 954 if (!fc->connected) 955 mask = POLLERR; 956 else if (request_pending(fc)) 957 mask |= POLLIN | POLLRDNORM; 958 spin_unlock(&fc->lock); 959 960 return mask; 961 } 962 963 /* 964 * Abort all requests on the given list (pending or processing) 965 * 966 * This function releases and reacquires fc->lock 967 */ 968 static void end_requests(struct fuse_conn *fc, struct list_head *head) 969 { 970 while (!list_empty(head)) { 971 struct fuse_req *req; 972 req = list_entry(head->next, struct fuse_req, list); 973 req->out.h.error = -ECONNABORTED; 974 request_end(fc, req); 975 spin_lock(&fc->lock); 976 } 977 } 978 979 /* 980 * Abort requests under I/O 981 * 982 * The requests are set to aborted and finished, and the request 983 * waiter is woken up. This will make request_wait_answer() wait 984 * until the request is unlocked and then return. 985 * 986 * If the request is asynchronous, then the end function needs to be 987 * called after waiting for the request to be unlocked (if it was 988 * locked). 989 */ 990 static void end_io_requests(struct fuse_conn *fc) 991 __releases(fc->lock) __acquires(fc->lock) 992 { 993 while (!list_empty(&fc->io)) { 994 struct fuse_req *req = 995 list_entry(fc->io.next, struct fuse_req, list); 996 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; 997 998 req->aborted = 1; 999 req->out.h.error = -ECONNABORTED; 1000 req->state = FUSE_REQ_FINISHED; 1001 list_del_init(&req->list); 1002 wake_up(&req->waitq); 1003 if (end) { 1004 req->end = NULL; 1005 /* The end function will consume this reference */ 1006 __fuse_get_request(req); 1007 spin_unlock(&fc->lock); 1008 wait_event(req->waitq, !req->locked); 1009 end(fc, req); 1010 spin_lock(&fc->lock); 1011 } 1012 } 1013 } 1014 1015 /* 1016 * Abort all requests. 1017 * 1018 * Emergency exit in case of a malicious or accidental deadlock, or 1019 * just a hung filesystem. 1020 * 1021 * The same effect is usually achievable through killing the 1022 * filesystem daemon and all users of the filesystem. The exception 1023 * is the combination of an asynchronous request and the tricky 1024 * deadlock (see Documentation/filesystems/fuse.txt). 1025 * 1026 * During the aborting, progression of requests from the pending and 1027 * processing lists onto the io list, and progression of new requests 1028 * onto the pending list is prevented by req->connected being false. 1029 * 1030 * Progression of requests under I/O to the processing list is 1031 * prevented by the req->aborted flag being true for these requests. 1032 * For this reason requests on the io list must be aborted first. 1033 */ 1034 void fuse_abort_conn(struct fuse_conn *fc) 1035 { 1036 spin_lock(&fc->lock); 1037 if (fc->connected) { 1038 fc->connected = 0; 1039 fc->blocked = 0; 1040 end_io_requests(fc); 1041 end_requests(fc, &fc->pending); 1042 end_requests(fc, &fc->processing); 1043 wake_up_all(&fc->waitq); 1044 wake_up_all(&fc->blocked_waitq); 1045 kill_fasync(&fc->fasync, SIGIO, POLL_IN); 1046 } 1047 spin_unlock(&fc->lock); 1048 } 1049 1050 static int fuse_dev_release(struct inode *inode, struct file *file) 1051 { 1052 struct fuse_conn *fc = fuse_get_conn(file); 1053 if (fc) { 1054 spin_lock(&fc->lock); 1055 fc->connected = 0; 1056 end_requests(fc, &fc->pending); 1057 end_requests(fc, &fc->processing); 1058 spin_unlock(&fc->lock); 1059 fuse_conn_put(fc); 1060 } 1061 1062 return 0; 1063 } 1064 1065 static int fuse_dev_fasync(int fd, struct file *file, int on) 1066 { 1067 struct fuse_conn *fc = fuse_get_conn(file); 1068 if (!fc) 1069 return -EPERM; 1070 1071 /* No locking - fasync_helper does its own locking */ 1072 return fasync_helper(fd, file, on, &fc->fasync); 1073 } 1074 1075 const struct file_operations fuse_dev_operations = { 1076 .owner = THIS_MODULE, 1077 .llseek = no_llseek, 1078 .read = do_sync_read, 1079 .aio_read = fuse_dev_read, 1080 .write = do_sync_write, 1081 .aio_write = fuse_dev_write, 1082 .poll = fuse_dev_poll, 1083 .release = fuse_dev_release, 1084 .fasync = fuse_dev_fasync, 1085 }; 1086 1087 static struct miscdevice fuse_miscdevice = { 1088 .minor = FUSE_MINOR, 1089 .name = "fuse", 1090 .fops = &fuse_dev_operations, 1091 }; 1092 1093 int __init fuse_dev_init(void) 1094 { 1095 int err = -ENOMEM; 1096 fuse_req_cachep = kmem_cache_create("fuse_request", 1097 sizeof(struct fuse_req), 1098 0, 0, NULL); 1099 if (!fuse_req_cachep) 1100 goto out; 1101 1102 err = misc_register(&fuse_miscdevice); 1103 if (err) 1104 goto out_cache_clean; 1105 1106 return 0; 1107 1108 out_cache_clean: 1109 kmem_cache_destroy(fuse_req_cachep); 1110 out: 1111 return err; 1112 } 1113 1114 void fuse_dev_cleanup(void) 1115 { 1116 misc_deregister(&fuse_miscdevice); 1117 kmem_cache_destroy(fuse_req_cachep); 1118 } 1119