xref: /openbmc/linux/fs/fuse/dev.c (revision ca79522c)
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4 
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8 
9 #include "fuse_i.h"
10 
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/swap.h>
21 #include <linux/splice.h>
22 #include <linux/aio.h>
23 
24 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
25 MODULE_ALIAS("devname:fuse");
26 
27 static struct kmem_cache *fuse_req_cachep;
28 
29 static struct fuse_conn *fuse_get_conn(struct file *file)
30 {
31 	/*
32 	 * Lockless access is OK, because file->private data is set
33 	 * once during mount and is valid until the file is released.
34 	 */
35 	return file->private_data;
36 }
37 
38 static void fuse_request_init(struct fuse_req *req, struct page **pages,
39 			      struct fuse_page_desc *page_descs,
40 			      unsigned npages)
41 {
42 	memset(req, 0, sizeof(*req));
43 	memset(pages, 0, sizeof(*pages) * npages);
44 	memset(page_descs, 0, sizeof(*page_descs) * npages);
45 	INIT_LIST_HEAD(&req->list);
46 	INIT_LIST_HEAD(&req->intr_entry);
47 	init_waitqueue_head(&req->waitq);
48 	atomic_set(&req->count, 1);
49 	req->pages = pages;
50 	req->page_descs = page_descs;
51 	req->max_pages = npages;
52 }
53 
54 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
55 {
56 	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
57 	if (req) {
58 		struct page **pages;
59 		struct fuse_page_desc *page_descs;
60 
61 		if (npages <= FUSE_REQ_INLINE_PAGES) {
62 			pages = req->inline_pages;
63 			page_descs = req->inline_page_descs;
64 		} else {
65 			pages = kmalloc(sizeof(struct page *) * npages, flags);
66 			page_descs = kmalloc(sizeof(struct fuse_page_desc) *
67 					     npages, flags);
68 		}
69 
70 		if (!pages || !page_descs) {
71 			kfree(pages);
72 			kfree(page_descs);
73 			kmem_cache_free(fuse_req_cachep, req);
74 			return NULL;
75 		}
76 
77 		fuse_request_init(req, pages, page_descs, npages);
78 	}
79 	return req;
80 }
81 
82 struct fuse_req *fuse_request_alloc(unsigned npages)
83 {
84 	return __fuse_request_alloc(npages, GFP_KERNEL);
85 }
86 EXPORT_SYMBOL_GPL(fuse_request_alloc);
87 
88 struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
89 {
90 	return __fuse_request_alloc(npages, GFP_NOFS);
91 }
92 
93 void fuse_request_free(struct fuse_req *req)
94 {
95 	if (req->pages != req->inline_pages) {
96 		kfree(req->pages);
97 		kfree(req->page_descs);
98 	}
99 	kmem_cache_free(fuse_req_cachep, req);
100 }
101 
102 static void block_sigs(sigset_t *oldset)
103 {
104 	sigset_t mask;
105 
106 	siginitsetinv(&mask, sigmask(SIGKILL));
107 	sigprocmask(SIG_BLOCK, &mask, oldset);
108 }
109 
110 static void restore_sigs(sigset_t *oldset)
111 {
112 	sigprocmask(SIG_SETMASK, oldset, NULL);
113 }
114 
115 void __fuse_get_request(struct fuse_req *req)
116 {
117 	atomic_inc(&req->count);
118 }
119 
120 /* Must be called with > 1 refcount */
121 static void __fuse_put_request(struct fuse_req *req)
122 {
123 	BUG_ON(atomic_read(&req->count) < 2);
124 	atomic_dec(&req->count);
125 }
126 
127 static void fuse_req_init_context(struct fuse_req *req)
128 {
129 	req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
130 	req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
131 	req->in.h.pid = current->pid;
132 }
133 
134 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
135 {
136 	return !fc->initialized || (for_background && fc->blocked);
137 }
138 
139 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
140 				       bool for_background)
141 {
142 	struct fuse_req *req;
143 	int err;
144 	atomic_inc(&fc->num_waiting);
145 
146 	if (fuse_block_alloc(fc, for_background)) {
147 		sigset_t oldset;
148 		int intr;
149 
150 		block_sigs(&oldset);
151 		intr = wait_event_interruptible_exclusive(fc->blocked_waitq,
152 				!fuse_block_alloc(fc, for_background));
153 		restore_sigs(&oldset);
154 		err = -EINTR;
155 		if (intr)
156 			goto out;
157 	}
158 
159 	err = -ENOTCONN;
160 	if (!fc->connected)
161 		goto out;
162 
163 	req = fuse_request_alloc(npages);
164 	err = -ENOMEM;
165 	if (!req) {
166 		if (for_background)
167 			wake_up(&fc->blocked_waitq);
168 		goto out;
169 	}
170 
171 	fuse_req_init_context(req);
172 	req->waiting = 1;
173 	req->background = for_background;
174 	return req;
175 
176  out:
177 	atomic_dec(&fc->num_waiting);
178 	return ERR_PTR(err);
179 }
180 
181 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
182 {
183 	return __fuse_get_req(fc, npages, false);
184 }
185 EXPORT_SYMBOL_GPL(fuse_get_req);
186 
187 struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
188 					     unsigned npages)
189 {
190 	return __fuse_get_req(fc, npages, true);
191 }
192 EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
193 
194 /*
195  * Return request in fuse_file->reserved_req.  However that may
196  * currently be in use.  If that is the case, wait for it to become
197  * available.
198  */
199 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
200 					 struct file *file)
201 {
202 	struct fuse_req *req = NULL;
203 	struct fuse_file *ff = file->private_data;
204 
205 	do {
206 		wait_event(fc->reserved_req_waitq, ff->reserved_req);
207 		spin_lock(&fc->lock);
208 		if (ff->reserved_req) {
209 			req = ff->reserved_req;
210 			ff->reserved_req = NULL;
211 			req->stolen_file = get_file(file);
212 		}
213 		spin_unlock(&fc->lock);
214 	} while (!req);
215 
216 	return req;
217 }
218 
219 /*
220  * Put stolen request back into fuse_file->reserved_req
221  */
222 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
223 {
224 	struct file *file = req->stolen_file;
225 	struct fuse_file *ff = file->private_data;
226 
227 	spin_lock(&fc->lock);
228 	fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
229 	BUG_ON(ff->reserved_req);
230 	ff->reserved_req = req;
231 	wake_up_all(&fc->reserved_req_waitq);
232 	spin_unlock(&fc->lock);
233 	fput(file);
234 }
235 
236 /*
237  * Gets a requests for a file operation, always succeeds
238  *
239  * This is used for sending the FLUSH request, which must get to
240  * userspace, due to POSIX locks which may need to be unlocked.
241  *
242  * If allocation fails due to OOM, use the reserved request in
243  * fuse_file.
244  *
245  * This is very unlikely to deadlock accidentally, since the
246  * filesystem should not have it's own file open.  If deadlock is
247  * intentional, it can still be broken by "aborting" the filesystem.
248  */
249 struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
250 					     struct file *file)
251 {
252 	struct fuse_req *req;
253 
254 	atomic_inc(&fc->num_waiting);
255 	wait_event(fc->blocked_waitq, fc->initialized);
256 	req = fuse_request_alloc(0);
257 	if (!req)
258 		req = get_reserved_req(fc, file);
259 
260 	fuse_req_init_context(req);
261 	req->waiting = 1;
262 	req->background = 0;
263 	return req;
264 }
265 
266 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
267 {
268 	if (atomic_dec_and_test(&req->count)) {
269 		if (unlikely(req->background)) {
270 			/*
271 			 * We get here in the unlikely case that a background
272 			 * request was allocated but not sent
273 			 */
274 			spin_lock(&fc->lock);
275 			if (!fc->blocked)
276 				wake_up(&fc->blocked_waitq);
277 			spin_unlock(&fc->lock);
278 		}
279 
280 		if (req->waiting)
281 			atomic_dec(&fc->num_waiting);
282 
283 		if (req->stolen_file)
284 			put_reserved_req(fc, req);
285 		else
286 			fuse_request_free(req);
287 	}
288 }
289 EXPORT_SYMBOL_GPL(fuse_put_request);
290 
291 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
292 {
293 	unsigned nbytes = 0;
294 	unsigned i;
295 
296 	for (i = 0; i < numargs; i++)
297 		nbytes += args[i].size;
298 
299 	return nbytes;
300 }
301 
302 static u64 fuse_get_unique(struct fuse_conn *fc)
303 {
304 	fc->reqctr++;
305 	/* zero is special */
306 	if (fc->reqctr == 0)
307 		fc->reqctr = 1;
308 
309 	return fc->reqctr;
310 }
311 
312 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
313 {
314 	req->in.h.len = sizeof(struct fuse_in_header) +
315 		len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
316 	list_add_tail(&req->list, &fc->pending);
317 	req->state = FUSE_REQ_PENDING;
318 	if (!req->waiting) {
319 		req->waiting = 1;
320 		atomic_inc(&fc->num_waiting);
321 	}
322 	wake_up(&fc->waitq);
323 	kill_fasync(&fc->fasync, SIGIO, POLL_IN);
324 }
325 
326 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
327 		       u64 nodeid, u64 nlookup)
328 {
329 	forget->forget_one.nodeid = nodeid;
330 	forget->forget_one.nlookup = nlookup;
331 
332 	spin_lock(&fc->lock);
333 	if (fc->connected) {
334 		fc->forget_list_tail->next = forget;
335 		fc->forget_list_tail = forget;
336 		wake_up(&fc->waitq);
337 		kill_fasync(&fc->fasync, SIGIO, POLL_IN);
338 	} else {
339 		kfree(forget);
340 	}
341 	spin_unlock(&fc->lock);
342 }
343 
344 static void flush_bg_queue(struct fuse_conn *fc)
345 {
346 	while (fc->active_background < fc->max_background &&
347 	       !list_empty(&fc->bg_queue)) {
348 		struct fuse_req *req;
349 
350 		req = list_entry(fc->bg_queue.next, struct fuse_req, list);
351 		list_del(&req->list);
352 		fc->active_background++;
353 		req->in.h.unique = fuse_get_unique(fc);
354 		queue_request(fc, req);
355 	}
356 }
357 
358 /*
359  * This function is called when a request is finished.  Either a reply
360  * has arrived or it was aborted (and not yet sent) or some error
361  * occurred during communication with userspace, or the device file
362  * was closed.  The requester thread is woken up (if still waiting),
363  * the 'end' callback is called if given, else the reference to the
364  * request is released
365  *
366  * Called with fc->lock, unlocks it
367  */
368 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
369 __releases(fc->lock)
370 {
371 	void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
372 	req->end = NULL;
373 	list_del(&req->list);
374 	list_del(&req->intr_entry);
375 	req->state = FUSE_REQ_FINISHED;
376 	if (req->background) {
377 		req->background = 0;
378 
379 		if (fc->num_background == fc->max_background)
380 			fc->blocked = 0;
381 
382 		/* Wake up next waiter, if any */
383 		if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
384 			wake_up(&fc->blocked_waitq);
385 
386 		if (fc->num_background == fc->congestion_threshold &&
387 		    fc->connected && fc->bdi_initialized) {
388 			clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
389 			clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
390 		}
391 		fc->num_background--;
392 		fc->active_background--;
393 		flush_bg_queue(fc);
394 	}
395 	spin_unlock(&fc->lock);
396 	wake_up(&req->waitq);
397 	if (end)
398 		end(fc, req);
399 	fuse_put_request(fc, req);
400 }
401 
402 static void wait_answer_interruptible(struct fuse_conn *fc,
403 				      struct fuse_req *req)
404 __releases(fc->lock)
405 __acquires(fc->lock)
406 {
407 	if (signal_pending(current))
408 		return;
409 
410 	spin_unlock(&fc->lock);
411 	wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
412 	spin_lock(&fc->lock);
413 }
414 
415 static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
416 {
417 	list_add_tail(&req->intr_entry, &fc->interrupts);
418 	wake_up(&fc->waitq);
419 	kill_fasync(&fc->fasync, SIGIO, POLL_IN);
420 }
421 
422 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
423 __releases(fc->lock)
424 __acquires(fc->lock)
425 {
426 	if (!fc->no_interrupt) {
427 		/* Any signal may interrupt this */
428 		wait_answer_interruptible(fc, req);
429 
430 		if (req->aborted)
431 			goto aborted;
432 		if (req->state == FUSE_REQ_FINISHED)
433 			return;
434 
435 		req->interrupted = 1;
436 		if (req->state == FUSE_REQ_SENT)
437 			queue_interrupt(fc, req);
438 	}
439 
440 	if (!req->force) {
441 		sigset_t oldset;
442 
443 		/* Only fatal signals may interrupt this */
444 		block_sigs(&oldset);
445 		wait_answer_interruptible(fc, req);
446 		restore_sigs(&oldset);
447 
448 		if (req->aborted)
449 			goto aborted;
450 		if (req->state == FUSE_REQ_FINISHED)
451 			return;
452 
453 		/* Request is not yet in userspace, bail out */
454 		if (req->state == FUSE_REQ_PENDING) {
455 			list_del(&req->list);
456 			__fuse_put_request(req);
457 			req->out.h.error = -EINTR;
458 			return;
459 		}
460 	}
461 
462 	/*
463 	 * Either request is already in userspace, or it was forced.
464 	 * Wait it out.
465 	 */
466 	spin_unlock(&fc->lock);
467 	wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
468 	spin_lock(&fc->lock);
469 
470 	if (!req->aborted)
471 		return;
472 
473  aborted:
474 	BUG_ON(req->state != FUSE_REQ_FINISHED);
475 	if (req->locked) {
476 		/* This is uninterruptible sleep, because data is
477 		   being copied to/from the buffers of req.  During
478 		   locked state, there mustn't be any filesystem
479 		   operation (e.g. page fault), since that could lead
480 		   to deadlock */
481 		spin_unlock(&fc->lock);
482 		wait_event(req->waitq, !req->locked);
483 		spin_lock(&fc->lock);
484 	}
485 }
486 
487 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
488 {
489 	BUG_ON(req->background);
490 	spin_lock(&fc->lock);
491 	if (!fc->connected)
492 		req->out.h.error = -ENOTCONN;
493 	else if (fc->conn_error)
494 		req->out.h.error = -ECONNREFUSED;
495 	else {
496 		req->in.h.unique = fuse_get_unique(fc);
497 		queue_request(fc, req);
498 		/* acquire extra reference, since request is still needed
499 		   after request_end() */
500 		__fuse_get_request(req);
501 
502 		request_wait_answer(fc, req);
503 	}
504 	spin_unlock(&fc->lock);
505 }
506 
507 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
508 {
509 	req->isreply = 1;
510 	__fuse_request_send(fc, req);
511 }
512 EXPORT_SYMBOL_GPL(fuse_request_send);
513 
514 static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
515 					    struct fuse_req *req)
516 {
517 	BUG_ON(!req->background);
518 	fc->num_background++;
519 	if (fc->num_background == fc->max_background)
520 		fc->blocked = 1;
521 	if (fc->num_background == fc->congestion_threshold &&
522 	    fc->bdi_initialized) {
523 		set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
524 		set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
525 	}
526 	list_add_tail(&req->list, &fc->bg_queue);
527 	flush_bg_queue(fc);
528 }
529 
530 static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
531 {
532 	spin_lock(&fc->lock);
533 	if (fc->connected) {
534 		fuse_request_send_nowait_locked(fc, req);
535 		spin_unlock(&fc->lock);
536 	} else {
537 		req->out.h.error = -ENOTCONN;
538 		request_end(fc, req);
539 	}
540 }
541 
542 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
543 {
544 	req->isreply = 1;
545 	fuse_request_send_nowait(fc, req);
546 }
547 EXPORT_SYMBOL_GPL(fuse_request_send_background);
548 
549 static int fuse_request_send_notify_reply(struct fuse_conn *fc,
550 					  struct fuse_req *req, u64 unique)
551 {
552 	int err = -ENODEV;
553 
554 	req->isreply = 0;
555 	req->in.h.unique = unique;
556 	spin_lock(&fc->lock);
557 	if (fc->connected) {
558 		queue_request(fc, req);
559 		err = 0;
560 	}
561 	spin_unlock(&fc->lock);
562 
563 	return err;
564 }
565 
566 /*
567  * Called under fc->lock
568  *
569  * fc->connected must have been checked previously
570  */
571 void fuse_request_send_background_locked(struct fuse_conn *fc,
572 					 struct fuse_req *req)
573 {
574 	req->isreply = 1;
575 	fuse_request_send_nowait_locked(fc, req);
576 }
577 
578 void fuse_force_forget(struct file *file, u64 nodeid)
579 {
580 	struct inode *inode = file_inode(file);
581 	struct fuse_conn *fc = get_fuse_conn(inode);
582 	struct fuse_req *req;
583 	struct fuse_forget_in inarg;
584 
585 	memset(&inarg, 0, sizeof(inarg));
586 	inarg.nlookup = 1;
587 	req = fuse_get_req_nofail_nopages(fc, file);
588 	req->in.h.opcode = FUSE_FORGET;
589 	req->in.h.nodeid = nodeid;
590 	req->in.numargs = 1;
591 	req->in.args[0].size = sizeof(inarg);
592 	req->in.args[0].value = &inarg;
593 	req->isreply = 0;
594 	__fuse_request_send(fc, req);
595 	/* ignore errors */
596 	fuse_put_request(fc, req);
597 }
598 
599 /*
600  * Lock the request.  Up to the next unlock_request() there mustn't be
601  * anything that could cause a page-fault.  If the request was already
602  * aborted bail out.
603  */
604 static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
605 {
606 	int err = 0;
607 	if (req) {
608 		spin_lock(&fc->lock);
609 		if (req->aborted)
610 			err = -ENOENT;
611 		else
612 			req->locked = 1;
613 		spin_unlock(&fc->lock);
614 	}
615 	return err;
616 }
617 
618 /*
619  * Unlock request.  If it was aborted during being locked, the
620  * requester thread is currently waiting for it to be unlocked, so
621  * wake it up.
622  */
623 static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
624 {
625 	if (req) {
626 		spin_lock(&fc->lock);
627 		req->locked = 0;
628 		if (req->aborted)
629 			wake_up(&req->waitq);
630 		spin_unlock(&fc->lock);
631 	}
632 }
633 
634 struct fuse_copy_state {
635 	struct fuse_conn *fc;
636 	int write;
637 	struct fuse_req *req;
638 	const struct iovec *iov;
639 	struct pipe_buffer *pipebufs;
640 	struct pipe_buffer *currbuf;
641 	struct pipe_inode_info *pipe;
642 	unsigned long nr_segs;
643 	unsigned long seglen;
644 	unsigned long addr;
645 	struct page *pg;
646 	void *mapaddr;
647 	void *buf;
648 	unsigned len;
649 	unsigned move_pages:1;
650 };
651 
652 static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
653 			   int write,
654 			   const struct iovec *iov, unsigned long nr_segs)
655 {
656 	memset(cs, 0, sizeof(*cs));
657 	cs->fc = fc;
658 	cs->write = write;
659 	cs->iov = iov;
660 	cs->nr_segs = nr_segs;
661 }
662 
663 /* Unmap and put previous page of userspace buffer */
664 static void fuse_copy_finish(struct fuse_copy_state *cs)
665 {
666 	if (cs->currbuf) {
667 		struct pipe_buffer *buf = cs->currbuf;
668 
669 		if (!cs->write) {
670 			buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
671 		} else {
672 			kunmap(buf->page);
673 			buf->len = PAGE_SIZE - cs->len;
674 		}
675 		cs->currbuf = NULL;
676 		cs->mapaddr = NULL;
677 	} else if (cs->mapaddr) {
678 		kunmap(cs->pg);
679 		if (cs->write) {
680 			flush_dcache_page(cs->pg);
681 			set_page_dirty_lock(cs->pg);
682 		}
683 		put_page(cs->pg);
684 		cs->mapaddr = NULL;
685 	}
686 }
687 
688 /*
689  * Get another pagefull of userspace buffer, and map it to kernel
690  * address space, and lock request
691  */
692 static int fuse_copy_fill(struct fuse_copy_state *cs)
693 {
694 	unsigned long offset;
695 	int err;
696 
697 	unlock_request(cs->fc, cs->req);
698 	fuse_copy_finish(cs);
699 	if (cs->pipebufs) {
700 		struct pipe_buffer *buf = cs->pipebufs;
701 
702 		if (!cs->write) {
703 			err = buf->ops->confirm(cs->pipe, buf);
704 			if (err)
705 				return err;
706 
707 			BUG_ON(!cs->nr_segs);
708 			cs->currbuf = buf;
709 			cs->mapaddr = buf->ops->map(cs->pipe, buf, 0);
710 			cs->len = buf->len;
711 			cs->buf = cs->mapaddr + buf->offset;
712 			cs->pipebufs++;
713 			cs->nr_segs--;
714 		} else {
715 			struct page *page;
716 
717 			if (cs->nr_segs == cs->pipe->buffers)
718 				return -EIO;
719 
720 			page = alloc_page(GFP_HIGHUSER);
721 			if (!page)
722 				return -ENOMEM;
723 
724 			buf->page = page;
725 			buf->offset = 0;
726 			buf->len = 0;
727 
728 			cs->currbuf = buf;
729 			cs->mapaddr = kmap(page);
730 			cs->buf = cs->mapaddr;
731 			cs->len = PAGE_SIZE;
732 			cs->pipebufs++;
733 			cs->nr_segs++;
734 		}
735 	} else {
736 		if (!cs->seglen) {
737 			BUG_ON(!cs->nr_segs);
738 			cs->seglen = cs->iov[0].iov_len;
739 			cs->addr = (unsigned long) cs->iov[0].iov_base;
740 			cs->iov++;
741 			cs->nr_segs--;
742 		}
743 		err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
744 		if (err < 0)
745 			return err;
746 		BUG_ON(err != 1);
747 		offset = cs->addr % PAGE_SIZE;
748 		cs->mapaddr = kmap(cs->pg);
749 		cs->buf = cs->mapaddr + offset;
750 		cs->len = min(PAGE_SIZE - offset, cs->seglen);
751 		cs->seglen -= cs->len;
752 		cs->addr += cs->len;
753 	}
754 
755 	return lock_request(cs->fc, cs->req);
756 }
757 
758 /* Do as much copy to/from userspace buffer as we can */
759 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
760 {
761 	unsigned ncpy = min(*size, cs->len);
762 	if (val) {
763 		if (cs->write)
764 			memcpy(cs->buf, *val, ncpy);
765 		else
766 			memcpy(*val, cs->buf, ncpy);
767 		*val += ncpy;
768 	}
769 	*size -= ncpy;
770 	cs->len -= ncpy;
771 	cs->buf += ncpy;
772 	return ncpy;
773 }
774 
775 static int fuse_check_page(struct page *page)
776 {
777 	if (page_mapcount(page) ||
778 	    page->mapping != NULL ||
779 	    page_count(page) != 1 ||
780 	    (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
781 	     ~(1 << PG_locked |
782 	       1 << PG_referenced |
783 	       1 << PG_uptodate |
784 	       1 << PG_lru |
785 	       1 << PG_active |
786 	       1 << PG_reclaim))) {
787 		printk(KERN_WARNING "fuse: trying to steal weird page\n");
788 		printk(KERN_WARNING "  page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
789 		return 1;
790 	}
791 	return 0;
792 }
793 
794 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
795 {
796 	int err;
797 	struct page *oldpage = *pagep;
798 	struct page *newpage;
799 	struct pipe_buffer *buf = cs->pipebufs;
800 
801 	unlock_request(cs->fc, cs->req);
802 	fuse_copy_finish(cs);
803 
804 	err = buf->ops->confirm(cs->pipe, buf);
805 	if (err)
806 		return err;
807 
808 	BUG_ON(!cs->nr_segs);
809 	cs->currbuf = buf;
810 	cs->len = buf->len;
811 	cs->pipebufs++;
812 	cs->nr_segs--;
813 
814 	if (cs->len != PAGE_SIZE)
815 		goto out_fallback;
816 
817 	if (buf->ops->steal(cs->pipe, buf) != 0)
818 		goto out_fallback;
819 
820 	newpage = buf->page;
821 
822 	if (WARN_ON(!PageUptodate(newpage)))
823 		return -EIO;
824 
825 	ClearPageMappedToDisk(newpage);
826 
827 	if (fuse_check_page(newpage) != 0)
828 		goto out_fallback_unlock;
829 
830 	/*
831 	 * This is a new and locked page, it shouldn't be mapped or
832 	 * have any special flags on it
833 	 */
834 	if (WARN_ON(page_mapped(oldpage)))
835 		goto out_fallback_unlock;
836 	if (WARN_ON(page_has_private(oldpage)))
837 		goto out_fallback_unlock;
838 	if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
839 		goto out_fallback_unlock;
840 	if (WARN_ON(PageMlocked(oldpage)))
841 		goto out_fallback_unlock;
842 
843 	err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
844 	if (err) {
845 		unlock_page(newpage);
846 		return err;
847 	}
848 
849 	page_cache_get(newpage);
850 
851 	if (!(buf->flags & PIPE_BUF_FLAG_LRU))
852 		lru_cache_add_file(newpage);
853 
854 	err = 0;
855 	spin_lock(&cs->fc->lock);
856 	if (cs->req->aborted)
857 		err = -ENOENT;
858 	else
859 		*pagep = newpage;
860 	spin_unlock(&cs->fc->lock);
861 
862 	if (err) {
863 		unlock_page(newpage);
864 		page_cache_release(newpage);
865 		return err;
866 	}
867 
868 	unlock_page(oldpage);
869 	page_cache_release(oldpage);
870 	cs->len = 0;
871 
872 	return 0;
873 
874 out_fallback_unlock:
875 	unlock_page(newpage);
876 out_fallback:
877 	cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
878 	cs->buf = cs->mapaddr + buf->offset;
879 
880 	err = lock_request(cs->fc, cs->req);
881 	if (err)
882 		return err;
883 
884 	return 1;
885 }
886 
887 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
888 			 unsigned offset, unsigned count)
889 {
890 	struct pipe_buffer *buf;
891 
892 	if (cs->nr_segs == cs->pipe->buffers)
893 		return -EIO;
894 
895 	unlock_request(cs->fc, cs->req);
896 	fuse_copy_finish(cs);
897 
898 	buf = cs->pipebufs;
899 	page_cache_get(page);
900 	buf->page = page;
901 	buf->offset = offset;
902 	buf->len = count;
903 
904 	cs->pipebufs++;
905 	cs->nr_segs++;
906 	cs->len = 0;
907 
908 	return 0;
909 }
910 
911 /*
912  * Copy a page in the request to/from the userspace buffer.  Must be
913  * done atomically
914  */
915 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
916 			  unsigned offset, unsigned count, int zeroing)
917 {
918 	int err;
919 	struct page *page = *pagep;
920 
921 	if (page && zeroing && count < PAGE_SIZE)
922 		clear_highpage(page);
923 
924 	while (count) {
925 		if (cs->write && cs->pipebufs && page) {
926 			return fuse_ref_page(cs, page, offset, count);
927 		} else if (!cs->len) {
928 			if (cs->move_pages && page &&
929 			    offset == 0 && count == PAGE_SIZE) {
930 				err = fuse_try_move_page(cs, pagep);
931 				if (err <= 0)
932 					return err;
933 			} else {
934 				err = fuse_copy_fill(cs);
935 				if (err)
936 					return err;
937 			}
938 		}
939 		if (page) {
940 			void *mapaddr = kmap_atomic(page);
941 			void *buf = mapaddr + offset;
942 			offset += fuse_copy_do(cs, &buf, &count);
943 			kunmap_atomic(mapaddr);
944 		} else
945 			offset += fuse_copy_do(cs, NULL, &count);
946 	}
947 	if (page && !cs->write)
948 		flush_dcache_page(page);
949 	return 0;
950 }
951 
952 /* Copy pages in the request to/from userspace buffer */
953 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
954 			   int zeroing)
955 {
956 	unsigned i;
957 	struct fuse_req *req = cs->req;
958 
959 	for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
960 		int err;
961 		unsigned offset = req->page_descs[i].offset;
962 		unsigned count = min(nbytes, req->page_descs[i].length);
963 
964 		err = fuse_copy_page(cs, &req->pages[i], offset, count,
965 				     zeroing);
966 		if (err)
967 			return err;
968 
969 		nbytes -= count;
970 	}
971 	return 0;
972 }
973 
974 /* Copy a single argument in the request to/from userspace buffer */
975 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
976 {
977 	while (size) {
978 		if (!cs->len) {
979 			int err = fuse_copy_fill(cs);
980 			if (err)
981 				return err;
982 		}
983 		fuse_copy_do(cs, &val, &size);
984 	}
985 	return 0;
986 }
987 
988 /* Copy request arguments to/from userspace buffer */
989 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
990 			  unsigned argpages, struct fuse_arg *args,
991 			  int zeroing)
992 {
993 	int err = 0;
994 	unsigned i;
995 
996 	for (i = 0; !err && i < numargs; i++)  {
997 		struct fuse_arg *arg = &args[i];
998 		if (i == numargs - 1 && argpages)
999 			err = fuse_copy_pages(cs, arg->size, zeroing);
1000 		else
1001 			err = fuse_copy_one(cs, arg->value, arg->size);
1002 	}
1003 	return err;
1004 }
1005 
1006 static int forget_pending(struct fuse_conn *fc)
1007 {
1008 	return fc->forget_list_head.next != NULL;
1009 }
1010 
1011 static int request_pending(struct fuse_conn *fc)
1012 {
1013 	return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) ||
1014 		forget_pending(fc);
1015 }
1016 
1017 /* Wait until a request is available on the pending list */
1018 static void request_wait(struct fuse_conn *fc)
1019 __releases(fc->lock)
1020 __acquires(fc->lock)
1021 {
1022 	DECLARE_WAITQUEUE(wait, current);
1023 
1024 	add_wait_queue_exclusive(&fc->waitq, &wait);
1025 	while (fc->connected && !request_pending(fc)) {
1026 		set_current_state(TASK_INTERRUPTIBLE);
1027 		if (signal_pending(current))
1028 			break;
1029 
1030 		spin_unlock(&fc->lock);
1031 		schedule();
1032 		spin_lock(&fc->lock);
1033 	}
1034 	set_current_state(TASK_RUNNING);
1035 	remove_wait_queue(&fc->waitq, &wait);
1036 }
1037 
1038 /*
1039  * Transfer an interrupt request to userspace
1040  *
1041  * Unlike other requests this is assembled on demand, without a need
1042  * to allocate a separate fuse_req structure.
1043  *
1044  * Called with fc->lock held, releases it
1045  */
1046 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
1047 			       size_t nbytes, struct fuse_req *req)
1048 __releases(fc->lock)
1049 {
1050 	struct fuse_in_header ih;
1051 	struct fuse_interrupt_in arg;
1052 	unsigned reqsize = sizeof(ih) + sizeof(arg);
1053 	int err;
1054 
1055 	list_del_init(&req->intr_entry);
1056 	req->intr_unique = fuse_get_unique(fc);
1057 	memset(&ih, 0, sizeof(ih));
1058 	memset(&arg, 0, sizeof(arg));
1059 	ih.len = reqsize;
1060 	ih.opcode = FUSE_INTERRUPT;
1061 	ih.unique = req->intr_unique;
1062 	arg.unique = req->in.h.unique;
1063 
1064 	spin_unlock(&fc->lock);
1065 	if (nbytes < reqsize)
1066 		return -EINVAL;
1067 
1068 	err = fuse_copy_one(cs, &ih, sizeof(ih));
1069 	if (!err)
1070 		err = fuse_copy_one(cs, &arg, sizeof(arg));
1071 	fuse_copy_finish(cs);
1072 
1073 	return err ? err : reqsize;
1074 }
1075 
1076 static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc,
1077 					       unsigned max,
1078 					       unsigned *countp)
1079 {
1080 	struct fuse_forget_link *head = fc->forget_list_head.next;
1081 	struct fuse_forget_link **newhead = &head;
1082 	unsigned count;
1083 
1084 	for (count = 0; *newhead != NULL && count < max; count++)
1085 		newhead = &(*newhead)->next;
1086 
1087 	fc->forget_list_head.next = *newhead;
1088 	*newhead = NULL;
1089 	if (fc->forget_list_head.next == NULL)
1090 		fc->forget_list_tail = &fc->forget_list_head;
1091 
1092 	if (countp != NULL)
1093 		*countp = count;
1094 
1095 	return head;
1096 }
1097 
1098 static int fuse_read_single_forget(struct fuse_conn *fc,
1099 				   struct fuse_copy_state *cs,
1100 				   size_t nbytes)
1101 __releases(fc->lock)
1102 {
1103 	int err;
1104 	struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL);
1105 	struct fuse_forget_in arg = {
1106 		.nlookup = forget->forget_one.nlookup,
1107 	};
1108 	struct fuse_in_header ih = {
1109 		.opcode = FUSE_FORGET,
1110 		.nodeid = forget->forget_one.nodeid,
1111 		.unique = fuse_get_unique(fc),
1112 		.len = sizeof(ih) + sizeof(arg),
1113 	};
1114 
1115 	spin_unlock(&fc->lock);
1116 	kfree(forget);
1117 	if (nbytes < ih.len)
1118 		return -EINVAL;
1119 
1120 	err = fuse_copy_one(cs, &ih, sizeof(ih));
1121 	if (!err)
1122 		err = fuse_copy_one(cs, &arg, sizeof(arg));
1123 	fuse_copy_finish(cs);
1124 
1125 	if (err)
1126 		return err;
1127 
1128 	return ih.len;
1129 }
1130 
1131 static int fuse_read_batch_forget(struct fuse_conn *fc,
1132 				   struct fuse_copy_state *cs, size_t nbytes)
1133 __releases(fc->lock)
1134 {
1135 	int err;
1136 	unsigned max_forgets;
1137 	unsigned count;
1138 	struct fuse_forget_link *head;
1139 	struct fuse_batch_forget_in arg = { .count = 0 };
1140 	struct fuse_in_header ih = {
1141 		.opcode = FUSE_BATCH_FORGET,
1142 		.unique = fuse_get_unique(fc),
1143 		.len = sizeof(ih) + sizeof(arg),
1144 	};
1145 
1146 	if (nbytes < ih.len) {
1147 		spin_unlock(&fc->lock);
1148 		return -EINVAL;
1149 	}
1150 
1151 	max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1152 	head = dequeue_forget(fc, max_forgets, &count);
1153 	spin_unlock(&fc->lock);
1154 
1155 	arg.count = count;
1156 	ih.len += count * sizeof(struct fuse_forget_one);
1157 	err = fuse_copy_one(cs, &ih, sizeof(ih));
1158 	if (!err)
1159 		err = fuse_copy_one(cs, &arg, sizeof(arg));
1160 
1161 	while (head) {
1162 		struct fuse_forget_link *forget = head;
1163 
1164 		if (!err) {
1165 			err = fuse_copy_one(cs, &forget->forget_one,
1166 					    sizeof(forget->forget_one));
1167 		}
1168 		head = forget->next;
1169 		kfree(forget);
1170 	}
1171 
1172 	fuse_copy_finish(cs);
1173 
1174 	if (err)
1175 		return err;
1176 
1177 	return ih.len;
1178 }
1179 
1180 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
1181 			    size_t nbytes)
1182 __releases(fc->lock)
1183 {
1184 	if (fc->minor < 16 || fc->forget_list_head.next->next == NULL)
1185 		return fuse_read_single_forget(fc, cs, nbytes);
1186 	else
1187 		return fuse_read_batch_forget(fc, cs, nbytes);
1188 }
1189 
1190 /*
1191  * Read a single request into the userspace filesystem's buffer.  This
1192  * function waits until a request is available, then removes it from
1193  * the pending list and copies request data to userspace buffer.  If
1194  * no reply is needed (FORGET) or request has been aborted or there
1195  * was an error during the copying then it's finished by calling
1196  * request_end().  Otherwise add it to the processing list, and set
1197  * the 'sent' flag.
1198  */
1199 static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
1200 				struct fuse_copy_state *cs, size_t nbytes)
1201 {
1202 	int err;
1203 	struct fuse_req *req;
1204 	struct fuse_in *in;
1205 	unsigned reqsize;
1206 
1207  restart:
1208 	spin_lock(&fc->lock);
1209 	err = -EAGAIN;
1210 	if ((file->f_flags & O_NONBLOCK) && fc->connected &&
1211 	    !request_pending(fc))
1212 		goto err_unlock;
1213 
1214 	request_wait(fc);
1215 	err = -ENODEV;
1216 	if (!fc->connected)
1217 		goto err_unlock;
1218 	err = -ERESTARTSYS;
1219 	if (!request_pending(fc))
1220 		goto err_unlock;
1221 
1222 	if (!list_empty(&fc->interrupts)) {
1223 		req = list_entry(fc->interrupts.next, struct fuse_req,
1224 				 intr_entry);
1225 		return fuse_read_interrupt(fc, cs, nbytes, req);
1226 	}
1227 
1228 	if (forget_pending(fc)) {
1229 		if (list_empty(&fc->pending) || fc->forget_batch-- > 0)
1230 			return fuse_read_forget(fc, cs, nbytes);
1231 
1232 		if (fc->forget_batch <= -8)
1233 			fc->forget_batch = 16;
1234 	}
1235 
1236 	req = list_entry(fc->pending.next, struct fuse_req, list);
1237 	req->state = FUSE_REQ_READING;
1238 	list_move(&req->list, &fc->io);
1239 
1240 	in = &req->in;
1241 	reqsize = in->h.len;
1242 	/* If request is too large, reply with an error and restart the read */
1243 	if (nbytes < reqsize) {
1244 		req->out.h.error = -EIO;
1245 		/* SETXATTR is special, since it may contain too large data */
1246 		if (in->h.opcode == FUSE_SETXATTR)
1247 			req->out.h.error = -E2BIG;
1248 		request_end(fc, req);
1249 		goto restart;
1250 	}
1251 	spin_unlock(&fc->lock);
1252 	cs->req = req;
1253 	err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1254 	if (!err)
1255 		err = fuse_copy_args(cs, in->numargs, in->argpages,
1256 				     (struct fuse_arg *) in->args, 0);
1257 	fuse_copy_finish(cs);
1258 	spin_lock(&fc->lock);
1259 	req->locked = 0;
1260 	if (req->aborted) {
1261 		request_end(fc, req);
1262 		return -ENODEV;
1263 	}
1264 	if (err) {
1265 		req->out.h.error = -EIO;
1266 		request_end(fc, req);
1267 		return err;
1268 	}
1269 	if (!req->isreply)
1270 		request_end(fc, req);
1271 	else {
1272 		req->state = FUSE_REQ_SENT;
1273 		list_move_tail(&req->list, &fc->processing);
1274 		if (req->interrupted)
1275 			queue_interrupt(fc, req);
1276 		spin_unlock(&fc->lock);
1277 	}
1278 	return reqsize;
1279 
1280  err_unlock:
1281 	spin_unlock(&fc->lock);
1282 	return err;
1283 }
1284 
1285 static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
1286 			      unsigned long nr_segs, loff_t pos)
1287 {
1288 	struct fuse_copy_state cs;
1289 	struct file *file = iocb->ki_filp;
1290 	struct fuse_conn *fc = fuse_get_conn(file);
1291 	if (!fc)
1292 		return -EPERM;
1293 
1294 	fuse_copy_init(&cs, fc, 1, iov, nr_segs);
1295 
1296 	return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
1297 }
1298 
1299 static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
1300 				   struct pipe_buffer *buf)
1301 {
1302 	return 1;
1303 }
1304 
1305 static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
1306 	.can_merge = 0,
1307 	.map = generic_pipe_buf_map,
1308 	.unmap = generic_pipe_buf_unmap,
1309 	.confirm = generic_pipe_buf_confirm,
1310 	.release = generic_pipe_buf_release,
1311 	.steal = fuse_dev_pipe_buf_steal,
1312 	.get = generic_pipe_buf_get,
1313 };
1314 
1315 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1316 				    struct pipe_inode_info *pipe,
1317 				    size_t len, unsigned int flags)
1318 {
1319 	int ret;
1320 	int page_nr = 0;
1321 	int do_wakeup = 0;
1322 	struct pipe_buffer *bufs;
1323 	struct fuse_copy_state cs;
1324 	struct fuse_conn *fc = fuse_get_conn(in);
1325 	if (!fc)
1326 		return -EPERM;
1327 
1328 	bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1329 	if (!bufs)
1330 		return -ENOMEM;
1331 
1332 	fuse_copy_init(&cs, fc, 1, NULL, 0);
1333 	cs.pipebufs = bufs;
1334 	cs.pipe = pipe;
1335 	ret = fuse_dev_do_read(fc, in, &cs, len);
1336 	if (ret < 0)
1337 		goto out;
1338 
1339 	ret = 0;
1340 	pipe_lock(pipe);
1341 
1342 	if (!pipe->readers) {
1343 		send_sig(SIGPIPE, current, 0);
1344 		if (!ret)
1345 			ret = -EPIPE;
1346 		goto out_unlock;
1347 	}
1348 
1349 	if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1350 		ret = -EIO;
1351 		goto out_unlock;
1352 	}
1353 
1354 	while (page_nr < cs.nr_segs) {
1355 		int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1356 		struct pipe_buffer *buf = pipe->bufs + newbuf;
1357 
1358 		buf->page = bufs[page_nr].page;
1359 		buf->offset = bufs[page_nr].offset;
1360 		buf->len = bufs[page_nr].len;
1361 		buf->ops = &fuse_dev_pipe_buf_ops;
1362 
1363 		pipe->nrbufs++;
1364 		page_nr++;
1365 		ret += buf->len;
1366 
1367 		if (pipe->files)
1368 			do_wakeup = 1;
1369 	}
1370 
1371 out_unlock:
1372 	pipe_unlock(pipe);
1373 
1374 	if (do_wakeup) {
1375 		smp_mb();
1376 		if (waitqueue_active(&pipe->wait))
1377 			wake_up_interruptible(&pipe->wait);
1378 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1379 	}
1380 
1381 out:
1382 	for (; page_nr < cs.nr_segs; page_nr++)
1383 		page_cache_release(bufs[page_nr].page);
1384 
1385 	kfree(bufs);
1386 	return ret;
1387 }
1388 
1389 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1390 			    struct fuse_copy_state *cs)
1391 {
1392 	struct fuse_notify_poll_wakeup_out outarg;
1393 	int err = -EINVAL;
1394 
1395 	if (size != sizeof(outarg))
1396 		goto err;
1397 
1398 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1399 	if (err)
1400 		goto err;
1401 
1402 	fuse_copy_finish(cs);
1403 	return fuse_notify_poll_wakeup(fc, &outarg);
1404 
1405 err:
1406 	fuse_copy_finish(cs);
1407 	return err;
1408 }
1409 
1410 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1411 				   struct fuse_copy_state *cs)
1412 {
1413 	struct fuse_notify_inval_inode_out outarg;
1414 	int err = -EINVAL;
1415 
1416 	if (size != sizeof(outarg))
1417 		goto err;
1418 
1419 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1420 	if (err)
1421 		goto err;
1422 	fuse_copy_finish(cs);
1423 
1424 	down_read(&fc->killsb);
1425 	err = -ENOENT;
1426 	if (fc->sb) {
1427 		err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1428 					       outarg.off, outarg.len);
1429 	}
1430 	up_read(&fc->killsb);
1431 	return err;
1432 
1433 err:
1434 	fuse_copy_finish(cs);
1435 	return err;
1436 }
1437 
1438 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1439 				   struct fuse_copy_state *cs)
1440 {
1441 	struct fuse_notify_inval_entry_out outarg;
1442 	int err = -ENOMEM;
1443 	char *buf;
1444 	struct qstr name;
1445 
1446 	buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1447 	if (!buf)
1448 		goto err;
1449 
1450 	err = -EINVAL;
1451 	if (size < sizeof(outarg))
1452 		goto err;
1453 
1454 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1455 	if (err)
1456 		goto err;
1457 
1458 	err = -ENAMETOOLONG;
1459 	if (outarg.namelen > FUSE_NAME_MAX)
1460 		goto err;
1461 
1462 	err = -EINVAL;
1463 	if (size != sizeof(outarg) + outarg.namelen + 1)
1464 		goto err;
1465 
1466 	name.name = buf;
1467 	name.len = outarg.namelen;
1468 	err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1469 	if (err)
1470 		goto err;
1471 	fuse_copy_finish(cs);
1472 	buf[outarg.namelen] = 0;
1473 	name.hash = full_name_hash(name.name, name.len);
1474 
1475 	down_read(&fc->killsb);
1476 	err = -ENOENT;
1477 	if (fc->sb)
1478 		err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1479 	up_read(&fc->killsb);
1480 	kfree(buf);
1481 	return err;
1482 
1483 err:
1484 	kfree(buf);
1485 	fuse_copy_finish(cs);
1486 	return err;
1487 }
1488 
1489 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1490 			      struct fuse_copy_state *cs)
1491 {
1492 	struct fuse_notify_delete_out outarg;
1493 	int err = -ENOMEM;
1494 	char *buf;
1495 	struct qstr name;
1496 
1497 	buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1498 	if (!buf)
1499 		goto err;
1500 
1501 	err = -EINVAL;
1502 	if (size < sizeof(outarg))
1503 		goto err;
1504 
1505 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1506 	if (err)
1507 		goto err;
1508 
1509 	err = -ENAMETOOLONG;
1510 	if (outarg.namelen > FUSE_NAME_MAX)
1511 		goto err;
1512 
1513 	err = -EINVAL;
1514 	if (size != sizeof(outarg) + outarg.namelen + 1)
1515 		goto err;
1516 
1517 	name.name = buf;
1518 	name.len = outarg.namelen;
1519 	err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1520 	if (err)
1521 		goto err;
1522 	fuse_copy_finish(cs);
1523 	buf[outarg.namelen] = 0;
1524 	name.hash = full_name_hash(name.name, name.len);
1525 
1526 	down_read(&fc->killsb);
1527 	err = -ENOENT;
1528 	if (fc->sb)
1529 		err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1530 					       outarg.child, &name);
1531 	up_read(&fc->killsb);
1532 	kfree(buf);
1533 	return err;
1534 
1535 err:
1536 	kfree(buf);
1537 	fuse_copy_finish(cs);
1538 	return err;
1539 }
1540 
1541 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1542 			     struct fuse_copy_state *cs)
1543 {
1544 	struct fuse_notify_store_out outarg;
1545 	struct inode *inode;
1546 	struct address_space *mapping;
1547 	u64 nodeid;
1548 	int err;
1549 	pgoff_t index;
1550 	unsigned int offset;
1551 	unsigned int num;
1552 	loff_t file_size;
1553 	loff_t end;
1554 
1555 	err = -EINVAL;
1556 	if (size < sizeof(outarg))
1557 		goto out_finish;
1558 
1559 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1560 	if (err)
1561 		goto out_finish;
1562 
1563 	err = -EINVAL;
1564 	if (size - sizeof(outarg) != outarg.size)
1565 		goto out_finish;
1566 
1567 	nodeid = outarg.nodeid;
1568 
1569 	down_read(&fc->killsb);
1570 
1571 	err = -ENOENT;
1572 	if (!fc->sb)
1573 		goto out_up_killsb;
1574 
1575 	inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1576 	if (!inode)
1577 		goto out_up_killsb;
1578 
1579 	mapping = inode->i_mapping;
1580 	index = outarg.offset >> PAGE_CACHE_SHIFT;
1581 	offset = outarg.offset & ~PAGE_CACHE_MASK;
1582 	file_size = i_size_read(inode);
1583 	end = outarg.offset + outarg.size;
1584 	if (end > file_size) {
1585 		file_size = end;
1586 		fuse_write_update_size(inode, file_size);
1587 	}
1588 
1589 	num = outarg.size;
1590 	while (num) {
1591 		struct page *page;
1592 		unsigned int this_num;
1593 
1594 		err = -ENOMEM;
1595 		page = find_or_create_page(mapping, index,
1596 					   mapping_gfp_mask(mapping));
1597 		if (!page)
1598 			goto out_iput;
1599 
1600 		this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1601 		err = fuse_copy_page(cs, &page, offset, this_num, 0);
1602 		if (!err && offset == 0 && (num != 0 || file_size == end))
1603 			SetPageUptodate(page);
1604 		unlock_page(page);
1605 		page_cache_release(page);
1606 
1607 		if (err)
1608 			goto out_iput;
1609 
1610 		num -= this_num;
1611 		offset = 0;
1612 		index++;
1613 	}
1614 
1615 	err = 0;
1616 
1617 out_iput:
1618 	iput(inode);
1619 out_up_killsb:
1620 	up_read(&fc->killsb);
1621 out_finish:
1622 	fuse_copy_finish(cs);
1623 	return err;
1624 }
1625 
1626 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1627 {
1628 	release_pages(req->pages, req->num_pages, 0);
1629 }
1630 
1631 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1632 			 struct fuse_notify_retrieve_out *outarg)
1633 {
1634 	int err;
1635 	struct address_space *mapping = inode->i_mapping;
1636 	struct fuse_req *req;
1637 	pgoff_t index;
1638 	loff_t file_size;
1639 	unsigned int num;
1640 	unsigned int offset;
1641 	size_t total_len = 0;
1642 	int num_pages;
1643 
1644 	offset = outarg->offset & ~PAGE_CACHE_MASK;
1645 	file_size = i_size_read(inode);
1646 
1647 	num = outarg->size;
1648 	if (outarg->offset > file_size)
1649 		num = 0;
1650 	else if (outarg->offset + num > file_size)
1651 		num = file_size - outarg->offset;
1652 
1653 	num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1654 	num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1655 
1656 	req = fuse_get_req(fc, num_pages);
1657 	if (IS_ERR(req))
1658 		return PTR_ERR(req);
1659 
1660 	req->in.h.opcode = FUSE_NOTIFY_REPLY;
1661 	req->in.h.nodeid = outarg->nodeid;
1662 	req->in.numargs = 2;
1663 	req->in.argpages = 1;
1664 	req->page_descs[0].offset = offset;
1665 	req->end = fuse_retrieve_end;
1666 
1667 	index = outarg->offset >> PAGE_CACHE_SHIFT;
1668 
1669 	while (num && req->num_pages < num_pages) {
1670 		struct page *page;
1671 		unsigned int this_num;
1672 
1673 		page = find_get_page(mapping, index);
1674 		if (!page)
1675 			break;
1676 
1677 		this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1678 		req->pages[req->num_pages] = page;
1679 		req->page_descs[req->num_pages].length = this_num;
1680 		req->num_pages++;
1681 
1682 		offset = 0;
1683 		num -= this_num;
1684 		total_len += this_num;
1685 		index++;
1686 	}
1687 	req->misc.retrieve_in.offset = outarg->offset;
1688 	req->misc.retrieve_in.size = total_len;
1689 	req->in.args[0].size = sizeof(req->misc.retrieve_in);
1690 	req->in.args[0].value = &req->misc.retrieve_in;
1691 	req->in.args[1].size = total_len;
1692 
1693 	err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1694 	if (err)
1695 		fuse_retrieve_end(fc, req);
1696 
1697 	return err;
1698 }
1699 
1700 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1701 				struct fuse_copy_state *cs)
1702 {
1703 	struct fuse_notify_retrieve_out outarg;
1704 	struct inode *inode;
1705 	int err;
1706 
1707 	err = -EINVAL;
1708 	if (size != sizeof(outarg))
1709 		goto copy_finish;
1710 
1711 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1712 	if (err)
1713 		goto copy_finish;
1714 
1715 	fuse_copy_finish(cs);
1716 
1717 	down_read(&fc->killsb);
1718 	err = -ENOENT;
1719 	if (fc->sb) {
1720 		u64 nodeid = outarg.nodeid;
1721 
1722 		inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1723 		if (inode) {
1724 			err = fuse_retrieve(fc, inode, &outarg);
1725 			iput(inode);
1726 		}
1727 	}
1728 	up_read(&fc->killsb);
1729 
1730 	return err;
1731 
1732 copy_finish:
1733 	fuse_copy_finish(cs);
1734 	return err;
1735 }
1736 
1737 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1738 		       unsigned int size, struct fuse_copy_state *cs)
1739 {
1740 	switch (code) {
1741 	case FUSE_NOTIFY_POLL:
1742 		return fuse_notify_poll(fc, size, cs);
1743 
1744 	case FUSE_NOTIFY_INVAL_INODE:
1745 		return fuse_notify_inval_inode(fc, size, cs);
1746 
1747 	case FUSE_NOTIFY_INVAL_ENTRY:
1748 		return fuse_notify_inval_entry(fc, size, cs);
1749 
1750 	case FUSE_NOTIFY_STORE:
1751 		return fuse_notify_store(fc, size, cs);
1752 
1753 	case FUSE_NOTIFY_RETRIEVE:
1754 		return fuse_notify_retrieve(fc, size, cs);
1755 
1756 	case FUSE_NOTIFY_DELETE:
1757 		return fuse_notify_delete(fc, size, cs);
1758 
1759 	default:
1760 		fuse_copy_finish(cs);
1761 		return -EINVAL;
1762 	}
1763 }
1764 
1765 /* Look up request on processing list by unique ID */
1766 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
1767 {
1768 	struct list_head *entry;
1769 
1770 	list_for_each(entry, &fc->processing) {
1771 		struct fuse_req *req;
1772 		req = list_entry(entry, struct fuse_req, list);
1773 		if (req->in.h.unique == unique || req->intr_unique == unique)
1774 			return req;
1775 	}
1776 	return NULL;
1777 }
1778 
1779 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1780 			 unsigned nbytes)
1781 {
1782 	unsigned reqsize = sizeof(struct fuse_out_header);
1783 
1784 	if (out->h.error)
1785 		return nbytes != reqsize ? -EINVAL : 0;
1786 
1787 	reqsize += len_args(out->numargs, out->args);
1788 
1789 	if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1790 		return -EINVAL;
1791 	else if (reqsize > nbytes) {
1792 		struct fuse_arg *lastarg = &out->args[out->numargs-1];
1793 		unsigned diffsize = reqsize - nbytes;
1794 		if (diffsize > lastarg->size)
1795 			return -EINVAL;
1796 		lastarg->size -= diffsize;
1797 	}
1798 	return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1799 			      out->page_zeroing);
1800 }
1801 
1802 /*
1803  * Write a single reply to a request.  First the header is copied from
1804  * the write buffer.  The request is then searched on the processing
1805  * list by the unique ID found in the header.  If found, then remove
1806  * it from the list and copy the rest of the buffer to the request.
1807  * The request is finished by calling request_end()
1808  */
1809 static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1810 				 struct fuse_copy_state *cs, size_t nbytes)
1811 {
1812 	int err;
1813 	struct fuse_req *req;
1814 	struct fuse_out_header oh;
1815 
1816 	if (nbytes < sizeof(struct fuse_out_header))
1817 		return -EINVAL;
1818 
1819 	err = fuse_copy_one(cs, &oh, sizeof(oh));
1820 	if (err)
1821 		goto err_finish;
1822 
1823 	err = -EINVAL;
1824 	if (oh.len != nbytes)
1825 		goto err_finish;
1826 
1827 	/*
1828 	 * Zero oh.unique indicates unsolicited notification message
1829 	 * and error contains notification code.
1830 	 */
1831 	if (!oh.unique) {
1832 		err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1833 		return err ? err : nbytes;
1834 	}
1835 
1836 	err = -EINVAL;
1837 	if (oh.error <= -1000 || oh.error > 0)
1838 		goto err_finish;
1839 
1840 	spin_lock(&fc->lock);
1841 	err = -ENOENT;
1842 	if (!fc->connected)
1843 		goto err_unlock;
1844 
1845 	req = request_find(fc, oh.unique);
1846 	if (!req)
1847 		goto err_unlock;
1848 
1849 	if (req->aborted) {
1850 		spin_unlock(&fc->lock);
1851 		fuse_copy_finish(cs);
1852 		spin_lock(&fc->lock);
1853 		request_end(fc, req);
1854 		return -ENOENT;
1855 	}
1856 	/* Is it an interrupt reply? */
1857 	if (req->intr_unique == oh.unique) {
1858 		err = -EINVAL;
1859 		if (nbytes != sizeof(struct fuse_out_header))
1860 			goto err_unlock;
1861 
1862 		if (oh.error == -ENOSYS)
1863 			fc->no_interrupt = 1;
1864 		else if (oh.error == -EAGAIN)
1865 			queue_interrupt(fc, req);
1866 
1867 		spin_unlock(&fc->lock);
1868 		fuse_copy_finish(cs);
1869 		return nbytes;
1870 	}
1871 
1872 	req->state = FUSE_REQ_WRITING;
1873 	list_move(&req->list, &fc->io);
1874 	req->out.h = oh;
1875 	req->locked = 1;
1876 	cs->req = req;
1877 	if (!req->out.page_replace)
1878 		cs->move_pages = 0;
1879 	spin_unlock(&fc->lock);
1880 
1881 	err = copy_out_args(cs, &req->out, nbytes);
1882 	fuse_copy_finish(cs);
1883 
1884 	spin_lock(&fc->lock);
1885 	req->locked = 0;
1886 	if (!err) {
1887 		if (req->aborted)
1888 			err = -ENOENT;
1889 	} else if (!req->aborted)
1890 		req->out.h.error = -EIO;
1891 	request_end(fc, req);
1892 
1893 	return err ? err : nbytes;
1894 
1895  err_unlock:
1896 	spin_unlock(&fc->lock);
1897  err_finish:
1898 	fuse_copy_finish(cs);
1899 	return err;
1900 }
1901 
1902 static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1903 			      unsigned long nr_segs, loff_t pos)
1904 {
1905 	struct fuse_copy_state cs;
1906 	struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1907 	if (!fc)
1908 		return -EPERM;
1909 
1910 	fuse_copy_init(&cs, fc, 0, iov, nr_segs);
1911 
1912 	return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
1913 }
1914 
1915 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1916 				     struct file *out, loff_t *ppos,
1917 				     size_t len, unsigned int flags)
1918 {
1919 	unsigned nbuf;
1920 	unsigned idx;
1921 	struct pipe_buffer *bufs;
1922 	struct fuse_copy_state cs;
1923 	struct fuse_conn *fc;
1924 	size_t rem;
1925 	ssize_t ret;
1926 
1927 	fc = fuse_get_conn(out);
1928 	if (!fc)
1929 		return -EPERM;
1930 
1931 	bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1932 	if (!bufs)
1933 		return -ENOMEM;
1934 
1935 	pipe_lock(pipe);
1936 	nbuf = 0;
1937 	rem = 0;
1938 	for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1939 		rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1940 
1941 	ret = -EINVAL;
1942 	if (rem < len) {
1943 		pipe_unlock(pipe);
1944 		goto out;
1945 	}
1946 
1947 	rem = len;
1948 	while (rem) {
1949 		struct pipe_buffer *ibuf;
1950 		struct pipe_buffer *obuf;
1951 
1952 		BUG_ON(nbuf >= pipe->buffers);
1953 		BUG_ON(!pipe->nrbufs);
1954 		ibuf = &pipe->bufs[pipe->curbuf];
1955 		obuf = &bufs[nbuf];
1956 
1957 		if (rem >= ibuf->len) {
1958 			*obuf = *ibuf;
1959 			ibuf->ops = NULL;
1960 			pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1961 			pipe->nrbufs--;
1962 		} else {
1963 			ibuf->ops->get(pipe, ibuf);
1964 			*obuf = *ibuf;
1965 			obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1966 			obuf->len = rem;
1967 			ibuf->offset += obuf->len;
1968 			ibuf->len -= obuf->len;
1969 		}
1970 		nbuf++;
1971 		rem -= obuf->len;
1972 	}
1973 	pipe_unlock(pipe);
1974 
1975 	fuse_copy_init(&cs, fc, 0, NULL, nbuf);
1976 	cs.pipebufs = bufs;
1977 	cs.pipe = pipe;
1978 
1979 	if (flags & SPLICE_F_MOVE)
1980 		cs.move_pages = 1;
1981 
1982 	ret = fuse_dev_do_write(fc, &cs, len);
1983 
1984 	for (idx = 0; idx < nbuf; idx++) {
1985 		struct pipe_buffer *buf = &bufs[idx];
1986 		buf->ops->release(pipe, buf);
1987 	}
1988 out:
1989 	kfree(bufs);
1990 	return ret;
1991 }
1992 
1993 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
1994 {
1995 	unsigned mask = POLLOUT | POLLWRNORM;
1996 	struct fuse_conn *fc = fuse_get_conn(file);
1997 	if (!fc)
1998 		return POLLERR;
1999 
2000 	poll_wait(file, &fc->waitq, wait);
2001 
2002 	spin_lock(&fc->lock);
2003 	if (!fc->connected)
2004 		mask = POLLERR;
2005 	else if (request_pending(fc))
2006 		mask |= POLLIN | POLLRDNORM;
2007 	spin_unlock(&fc->lock);
2008 
2009 	return mask;
2010 }
2011 
2012 /*
2013  * Abort all requests on the given list (pending or processing)
2014  *
2015  * This function releases and reacquires fc->lock
2016  */
2017 static void end_requests(struct fuse_conn *fc, struct list_head *head)
2018 __releases(fc->lock)
2019 __acquires(fc->lock)
2020 {
2021 	while (!list_empty(head)) {
2022 		struct fuse_req *req;
2023 		req = list_entry(head->next, struct fuse_req, list);
2024 		req->out.h.error = -ECONNABORTED;
2025 		request_end(fc, req);
2026 		spin_lock(&fc->lock);
2027 	}
2028 }
2029 
2030 /*
2031  * Abort requests under I/O
2032  *
2033  * The requests are set to aborted and finished, and the request
2034  * waiter is woken up.  This will make request_wait_answer() wait
2035  * until the request is unlocked and then return.
2036  *
2037  * If the request is asynchronous, then the end function needs to be
2038  * called after waiting for the request to be unlocked (if it was
2039  * locked).
2040  */
2041 static void end_io_requests(struct fuse_conn *fc)
2042 __releases(fc->lock)
2043 __acquires(fc->lock)
2044 {
2045 	while (!list_empty(&fc->io)) {
2046 		struct fuse_req *req =
2047 			list_entry(fc->io.next, struct fuse_req, list);
2048 		void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
2049 
2050 		req->aborted = 1;
2051 		req->out.h.error = -ECONNABORTED;
2052 		req->state = FUSE_REQ_FINISHED;
2053 		list_del_init(&req->list);
2054 		wake_up(&req->waitq);
2055 		if (end) {
2056 			req->end = NULL;
2057 			__fuse_get_request(req);
2058 			spin_unlock(&fc->lock);
2059 			wait_event(req->waitq, !req->locked);
2060 			end(fc, req);
2061 			fuse_put_request(fc, req);
2062 			spin_lock(&fc->lock);
2063 		}
2064 	}
2065 }
2066 
2067 static void end_queued_requests(struct fuse_conn *fc)
2068 __releases(fc->lock)
2069 __acquires(fc->lock)
2070 {
2071 	fc->max_background = UINT_MAX;
2072 	flush_bg_queue(fc);
2073 	end_requests(fc, &fc->pending);
2074 	end_requests(fc, &fc->processing);
2075 	while (forget_pending(fc))
2076 		kfree(dequeue_forget(fc, 1, NULL));
2077 }
2078 
2079 static void end_polls(struct fuse_conn *fc)
2080 {
2081 	struct rb_node *p;
2082 
2083 	p = rb_first(&fc->polled_files);
2084 
2085 	while (p) {
2086 		struct fuse_file *ff;
2087 		ff = rb_entry(p, struct fuse_file, polled_node);
2088 		wake_up_interruptible_all(&ff->poll_wait);
2089 
2090 		p = rb_next(p);
2091 	}
2092 }
2093 
2094 /*
2095  * Abort all requests.
2096  *
2097  * Emergency exit in case of a malicious or accidental deadlock, or
2098  * just a hung filesystem.
2099  *
2100  * The same effect is usually achievable through killing the
2101  * filesystem daemon and all users of the filesystem.  The exception
2102  * is the combination of an asynchronous request and the tricky
2103  * deadlock (see Documentation/filesystems/fuse.txt).
2104  *
2105  * During the aborting, progression of requests from the pending and
2106  * processing lists onto the io list, and progression of new requests
2107  * onto the pending list is prevented by req->connected being false.
2108  *
2109  * Progression of requests under I/O to the processing list is
2110  * prevented by the req->aborted flag being true for these requests.
2111  * For this reason requests on the io list must be aborted first.
2112  */
2113 void fuse_abort_conn(struct fuse_conn *fc)
2114 {
2115 	spin_lock(&fc->lock);
2116 	if (fc->connected) {
2117 		fc->connected = 0;
2118 		fc->blocked = 0;
2119 		fc->initialized = 1;
2120 		end_io_requests(fc);
2121 		end_queued_requests(fc);
2122 		end_polls(fc);
2123 		wake_up_all(&fc->waitq);
2124 		wake_up_all(&fc->blocked_waitq);
2125 		kill_fasync(&fc->fasync, SIGIO, POLL_IN);
2126 	}
2127 	spin_unlock(&fc->lock);
2128 }
2129 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2130 
2131 int fuse_dev_release(struct inode *inode, struct file *file)
2132 {
2133 	struct fuse_conn *fc = fuse_get_conn(file);
2134 	if (fc) {
2135 		spin_lock(&fc->lock);
2136 		fc->connected = 0;
2137 		fc->blocked = 0;
2138 		fc->initialized = 1;
2139 		end_queued_requests(fc);
2140 		end_polls(fc);
2141 		wake_up_all(&fc->blocked_waitq);
2142 		spin_unlock(&fc->lock);
2143 		fuse_conn_put(fc);
2144 	}
2145 
2146 	return 0;
2147 }
2148 EXPORT_SYMBOL_GPL(fuse_dev_release);
2149 
2150 static int fuse_dev_fasync(int fd, struct file *file, int on)
2151 {
2152 	struct fuse_conn *fc = fuse_get_conn(file);
2153 	if (!fc)
2154 		return -EPERM;
2155 
2156 	/* No locking - fasync_helper does its own locking */
2157 	return fasync_helper(fd, file, on, &fc->fasync);
2158 }
2159 
2160 const struct file_operations fuse_dev_operations = {
2161 	.owner		= THIS_MODULE,
2162 	.llseek		= no_llseek,
2163 	.read		= do_sync_read,
2164 	.aio_read	= fuse_dev_read,
2165 	.splice_read	= fuse_dev_splice_read,
2166 	.write		= do_sync_write,
2167 	.aio_write	= fuse_dev_write,
2168 	.splice_write	= fuse_dev_splice_write,
2169 	.poll		= fuse_dev_poll,
2170 	.release	= fuse_dev_release,
2171 	.fasync		= fuse_dev_fasync,
2172 };
2173 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2174 
2175 static struct miscdevice fuse_miscdevice = {
2176 	.minor = FUSE_MINOR,
2177 	.name  = "fuse",
2178 	.fops = &fuse_dev_operations,
2179 };
2180 
2181 int __init fuse_dev_init(void)
2182 {
2183 	int err = -ENOMEM;
2184 	fuse_req_cachep = kmem_cache_create("fuse_request",
2185 					    sizeof(struct fuse_req),
2186 					    0, 0, NULL);
2187 	if (!fuse_req_cachep)
2188 		goto out;
2189 
2190 	err = misc_register(&fuse_miscdevice);
2191 	if (err)
2192 		goto out_cache_clean;
2193 
2194 	return 0;
2195 
2196  out_cache_clean:
2197 	kmem_cache_destroy(fuse_req_cachep);
2198  out:
2199 	return err;
2200 }
2201 
2202 void fuse_dev_cleanup(void)
2203 {
2204 	misc_deregister(&fuse_miscdevice);
2205 	kmem_cache_destroy(fuse_req_cachep);
2206 }
2207