xref: /openbmc/linux/fs/fuse/dev.c (revision 7909b1c6)
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4 
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8 
9 #include "fuse_i.h"
10 
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/swap.h>
21 #include <linux/splice.h>
22 
23 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
24 MODULE_ALIAS("devname:fuse");
25 
26 static struct kmem_cache *fuse_req_cachep;
27 
28 static struct fuse_conn *fuse_get_conn(struct file *file)
29 {
30 	/*
31 	 * Lockless access is OK, because file->private data is set
32 	 * once during mount and is valid until the file is released.
33 	 */
34 	return file->private_data;
35 }
36 
37 static void fuse_request_init(struct fuse_req *req)
38 {
39 	memset(req, 0, sizeof(*req));
40 	INIT_LIST_HEAD(&req->list);
41 	INIT_LIST_HEAD(&req->intr_entry);
42 	init_waitqueue_head(&req->waitq);
43 	atomic_set(&req->count, 1);
44 }
45 
46 struct fuse_req *fuse_request_alloc(void)
47 {
48 	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
49 	if (req)
50 		fuse_request_init(req);
51 	return req;
52 }
53 EXPORT_SYMBOL_GPL(fuse_request_alloc);
54 
55 struct fuse_req *fuse_request_alloc_nofs(void)
56 {
57 	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS);
58 	if (req)
59 		fuse_request_init(req);
60 	return req;
61 }
62 
63 void fuse_request_free(struct fuse_req *req)
64 {
65 	kmem_cache_free(fuse_req_cachep, req);
66 }
67 
68 static void block_sigs(sigset_t *oldset)
69 {
70 	sigset_t mask;
71 
72 	siginitsetinv(&mask, sigmask(SIGKILL));
73 	sigprocmask(SIG_BLOCK, &mask, oldset);
74 }
75 
76 static void restore_sigs(sigset_t *oldset)
77 {
78 	sigprocmask(SIG_SETMASK, oldset, NULL);
79 }
80 
81 static void __fuse_get_request(struct fuse_req *req)
82 {
83 	atomic_inc(&req->count);
84 }
85 
86 /* Must be called with > 1 refcount */
87 static void __fuse_put_request(struct fuse_req *req)
88 {
89 	BUG_ON(atomic_read(&req->count) < 2);
90 	atomic_dec(&req->count);
91 }
92 
93 static void fuse_req_init_context(struct fuse_req *req)
94 {
95 	req->in.h.uid = current_fsuid();
96 	req->in.h.gid = current_fsgid();
97 	req->in.h.pid = current->pid;
98 }
99 
100 struct fuse_req *fuse_get_req(struct fuse_conn *fc)
101 {
102 	struct fuse_req *req;
103 	sigset_t oldset;
104 	int intr;
105 	int err;
106 
107 	atomic_inc(&fc->num_waiting);
108 	block_sigs(&oldset);
109 	intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
110 	restore_sigs(&oldset);
111 	err = -EINTR;
112 	if (intr)
113 		goto out;
114 
115 	err = -ENOTCONN;
116 	if (!fc->connected)
117 		goto out;
118 
119 	req = fuse_request_alloc();
120 	err = -ENOMEM;
121 	if (!req)
122 		goto out;
123 
124 	fuse_req_init_context(req);
125 	req->waiting = 1;
126 	return req;
127 
128  out:
129 	atomic_dec(&fc->num_waiting);
130 	return ERR_PTR(err);
131 }
132 EXPORT_SYMBOL_GPL(fuse_get_req);
133 
134 /*
135  * Return request in fuse_file->reserved_req.  However that may
136  * currently be in use.  If that is the case, wait for it to become
137  * available.
138  */
139 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
140 					 struct file *file)
141 {
142 	struct fuse_req *req = NULL;
143 	struct fuse_file *ff = file->private_data;
144 
145 	do {
146 		wait_event(fc->reserved_req_waitq, ff->reserved_req);
147 		spin_lock(&fc->lock);
148 		if (ff->reserved_req) {
149 			req = ff->reserved_req;
150 			ff->reserved_req = NULL;
151 			get_file(file);
152 			req->stolen_file = file;
153 		}
154 		spin_unlock(&fc->lock);
155 	} while (!req);
156 
157 	return req;
158 }
159 
160 /*
161  * Put stolen request back into fuse_file->reserved_req
162  */
163 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
164 {
165 	struct file *file = req->stolen_file;
166 	struct fuse_file *ff = file->private_data;
167 
168 	spin_lock(&fc->lock);
169 	fuse_request_init(req);
170 	BUG_ON(ff->reserved_req);
171 	ff->reserved_req = req;
172 	wake_up_all(&fc->reserved_req_waitq);
173 	spin_unlock(&fc->lock);
174 	fput(file);
175 }
176 
177 /*
178  * Gets a requests for a file operation, always succeeds
179  *
180  * This is used for sending the FLUSH request, which must get to
181  * userspace, due to POSIX locks which may need to be unlocked.
182  *
183  * If allocation fails due to OOM, use the reserved request in
184  * fuse_file.
185  *
186  * This is very unlikely to deadlock accidentally, since the
187  * filesystem should not have it's own file open.  If deadlock is
188  * intentional, it can still be broken by "aborting" the filesystem.
189  */
190 struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
191 {
192 	struct fuse_req *req;
193 
194 	atomic_inc(&fc->num_waiting);
195 	wait_event(fc->blocked_waitq, !fc->blocked);
196 	req = fuse_request_alloc();
197 	if (!req)
198 		req = get_reserved_req(fc, file);
199 
200 	fuse_req_init_context(req);
201 	req->waiting = 1;
202 	return req;
203 }
204 
205 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
206 {
207 	if (atomic_dec_and_test(&req->count)) {
208 		if (req->waiting)
209 			atomic_dec(&fc->num_waiting);
210 
211 		if (req->stolen_file)
212 			put_reserved_req(fc, req);
213 		else
214 			fuse_request_free(req);
215 	}
216 }
217 EXPORT_SYMBOL_GPL(fuse_put_request);
218 
219 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
220 {
221 	unsigned nbytes = 0;
222 	unsigned i;
223 
224 	for (i = 0; i < numargs; i++)
225 		nbytes += args[i].size;
226 
227 	return nbytes;
228 }
229 
230 static u64 fuse_get_unique(struct fuse_conn *fc)
231 {
232 	fc->reqctr++;
233 	/* zero is special */
234 	if (fc->reqctr == 0)
235 		fc->reqctr = 1;
236 
237 	return fc->reqctr;
238 }
239 
240 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
241 {
242 	req->in.h.unique = fuse_get_unique(fc);
243 	req->in.h.len = sizeof(struct fuse_in_header) +
244 		len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
245 	list_add_tail(&req->list, &fc->pending);
246 	req->state = FUSE_REQ_PENDING;
247 	if (!req->waiting) {
248 		req->waiting = 1;
249 		atomic_inc(&fc->num_waiting);
250 	}
251 	wake_up(&fc->waitq);
252 	kill_fasync(&fc->fasync, SIGIO, POLL_IN);
253 }
254 
255 static void flush_bg_queue(struct fuse_conn *fc)
256 {
257 	while (fc->active_background < fc->max_background &&
258 	       !list_empty(&fc->bg_queue)) {
259 		struct fuse_req *req;
260 
261 		req = list_entry(fc->bg_queue.next, struct fuse_req, list);
262 		list_del(&req->list);
263 		fc->active_background++;
264 		queue_request(fc, req);
265 	}
266 }
267 
268 /*
269  * This function is called when a request is finished.  Either a reply
270  * has arrived or it was aborted (and not yet sent) or some error
271  * occurred during communication with userspace, or the device file
272  * was closed.  The requester thread is woken up (if still waiting),
273  * the 'end' callback is called if given, else the reference to the
274  * request is released
275  *
276  * Called with fc->lock, unlocks it
277  */
278 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
279 __releases(&fc->lock)
280 {
281 	void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
282 	req->end = NULL;
283 	list_del(&req->list);
284 	list_del(&req->intr_entry);
285 	req->state = FUSE_REQ_FINISHED;
286 	if (req->background) {
287 		if (fc->num_background == fc->max_background) {
288 			fc->blocked = 0;
289 			wake_up_all(&fc->blocked_waitq);
290 		}
291 		if (fc->num_background == fc->congestion_threshold &&
292 		    fc->connected && fc->bdi_initialized) {
293 			clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
294 			clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
295 		}
296 		fc->num_background--;
297 		fc->active_background--;
298 		flush_bg_queue(fc);
299 	}
300 	spin_unlock(&fc->lock);
301 	wake_up(&req->waitq);
302 	if (end)
303 		end(fc, req);
304 	fuse_put_request(fc, req);
305 }
306 
307 static void wait_answer_interruptible(struct fuse_conn *fc,
308 				      struct fuse_req *req)
309 __releases(&fc->lock)
310 __acquires(&fc->lock)
311 {
312 	if (signal_pending(current))
313 		return;
314 
315 	spin_unlock(&fc->lock);
316 	wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
317 	spin_lock(&fc->lock);
318 }
319 
320 static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
321 {
322 	list_add_tail(&req->intr_entry, &fc->interrupts);
323 	wake_up(&fc->waitq);
324 	kill_fasync(&fc->fasync, SIGIO, POLL_IN);
325 }
326 
327 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
328 __releases(&fc->lock)
329 __acquires(&fc->lock)
330 {
331 	if (!fc->no_interrupt) {
332 		/* Any signal may interrupt this */
333 		wait_answer_interruptible(fc, req);
334 
335 		if (req->aborted)
336 			goto aborted;
337 		if (req->state == FUSE_REQ_FINISHED)
338 			return;
339 
340 		req->interrupted = 1;
341 		if (req->state == FUSE_REQ_SENT)
342 			queue_interrupt(fc, req);
343 	}
344 
345 	if (!req->force) {
346 		sigset_t oldset;
347 
348 		/* Only fatal signals may interrupt this */
349 		block_sigs(&oldset);
350 		wait_answer_interruptible(fc, req);
351 		restore_sigs(&oldset);
352 
353 		if (req->aborted)
354 			goto aborted;
355 		if (req->state == FUSE_REQ_FINISHED)
356 			return;
357 
358 		/* Request is not yet in userspace, bail out */
359 		if (req->state == FUSE_REQ_PENDING) {
360 			list_del(&req->list);
361 			__fuse_put_request(req);
362 			req->out.h.error = -EINTR;
363 			return;
364 		}
365 	}
366 
367 	/*
368 	 * Either request is already in userspace, or it was forced.
369 	 * Wait it out.
370 	 */
371 	spin_unlock(&fc->lock);
372 	wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
373 	spin_lock(&fc->lock);
374 
375 	if (!req->aborted)
376 		return;
377 
378  aborted:
379 	BUG_ON(req->state != FUSE_REQ_FINISHED);
380 	if (req->locked) {
381 		/* This is uninterruptible sleep, because data is
382 		   being copied to/from the buffers of req.  During
383 		   locked state, there mustn't be any filesystem
384 		   operation (e.g. page fault), since that could lead
385 		   to deadlock */
386 		spin_unlock(&fc->lock);
387 		wait_event(req->waitq, !req->locked);
388 		spin_lock(&fc->lock);
389 	}
390 }
391 
392 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
393 {
394 	req->isreply = 1;
395 	spin_lock(&fc->lock);
396 	if (!fc->connected)
397 		req->out.h.error = -ENOTCONN;
398 	else if (fc->conn_error)
399 		req->out.h.error = -ECONNREFUSED;
400 	else {
401 		queue_request(fc, req);
402 		/* acquire extra reference, since request is still needed
403 		   after request_end() */
404 		__fuse_get_request(req);
405 
406 		request_wait_answer(fc, req);
407 	}
408 	spin_unlock(&fc->lock);
409 }
410 EXPORT_SYMBOL_GPL(fuse_request_send);
411 
412 static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
413 					    struct fuse_req *req)
414 {
415 	req->background = 1;
416 	fc->num_background++;
417 	if (fc->num_background == fc->max_background)
418 		fc->blocked = 1;
419 	if (fc->num_background == fc->congestion_threshold &&
420 	    fc->bdi_initialized) {
421 		set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
422 		set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
423 	}
424 	list_add_tail(&req->list, &fc->bg_queue);
425 	flush_bg_queue(fc);
426 }
427 
428 static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
429 {
430 	spin_lock(&fc->lock);
431 	if (fc->connected) {
432 		fuse_request_send_nowait_locked(fc, req);
433 		spin_unlock(&fc->lock);
434 	} else {
435 		req->out.h.error = -ENOTCONN;
436 		request_end(fc, req);
437 	}
438 }
439 
440 void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
441 {
442 	req->isreply = 0;
443 	fuse_request_send_nowait(fc, req);
444 }
445 
446 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
447 {
448 	req->isreply = 1;
449 	fuse_request_send_nowait(fc, req);
450 }
451 EXPORT_SYMBOL_GPL(fuse_request_send_background);
452 
453 /*
454  * Called under fc->lock
455  *
456  * fc->connected must have been checked previously
457  */
458 void fuse_request_send_background_locked(struct fuse_conn *fc,
459 					 struct fuse_req *req)
460 {
461 	req->isreply = 1;
462 	fuse_request_send_nowait_locked(fc, req);
463 }
464 
465 /*
466  * Lock the request.  Up to the next unlock_request() there mustn't be
467  * anything that could cause a page-fault.  If the request was already
468  * aborted bail out.
469  */
470 static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
471 {
472 	int err = 0;
473 	if (req) {
474 		spin_lock(&fc->lock);
475 		if (req->aborted)
476 			err = -ENOENT;
477 		else
478 			req->locked = 1;
479 		spin_unlock(&fc->lock);
480 	}
481 	return err;
482 }
483 
484 /*
485  * Unlock request.  If it was aborted during being locked, the
486  * requester thread is currently waiting for it to be unlocked, so
487  * wake it up.
488  */
489 static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
490 {
491 	if (req) {
492 		spin_lock(&fc->lock);
493 		req->locked = 0;
494 		if (req->aborted)
495 			wake_up(&req->waitq);
496 		spin_unlock(&fc->lock);
497 	}
498 }
499 
500 struct fuse_copy_state {
501 	struct fuse_conn *fc;
502 	int write;
503 	struct fuse_req *req;
504 	const struct iovec *iov;
505 	struct pipe_buffer *pipebufs;
506 	struct pipe_buffer *currbuf;
507 	struct pipe_inode_info *pipe;
508 	unsigned long nr_segs;
509 	unsigned long seglen;
510 	unsigned long addr;
511 	struct page *pg;
512 	void *mapaddr;
513 	void *buf;
514 	unsigned len;
515 	unsigned move_pages:1;
516 };
517 
518 static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
519 			   int write,
520 			   const struct iovec *iov, unsigned long nr_segs)
521 {
522 	memset(cs, 0, sizeof(*cs));
523 	cs->fc = fc;
524 	cs->write = write;
525 	cs->iov = iov;
526 	cs->nr_segs = nr_segs;
527 }
528 
529 /* Unmap and put previous page of userspace buffer */
530 static void fuse_copy_finish(struct fuse_copy_state *cs)
531 {
532 	if (cs->currbuf) {
533 		struct pipe_buffer *buf = cs->currbuf;
534 
535 		if (!cs->write) {
536 			buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
537 		} else {
538 			kunmap(buf->page);
539 			buf->len = PAGE_SIZE - cs->len;
540 		}
541 		cs->currbuf = NULL;
542 		cs->mapaddr = NULL;
543 	} else if (cs->mapaddr) {
544 		kunmap(cs->pg);
545 		if (cs->write) {
546 			flush_dcache_page(cs->pg);
547 			set_page_dirty_lock(cs->pg);
548 		}
549 		put_page(cs->pg);
550 		cs->mapaddr = NULL;
551 	}
552 }
553 
554 /*
555  * Get another pagefull of userspace buffer, and map it to kernel
556  * address space, and lock request
557  */
558 static int fuse_copy_fill(struct fuse_copy_state *cs)
559 {
560 	unsigned long offset;
561 	int err;
562 
563 	unlock_request(cs->fc, cs->req);
564 	fuse_copy_finish(cs);
565 	if (cs->pipebufs) {
566 		struct pipe_buffer *buf = cs->pipebufs;
567 
568 		if (!cs->write) {
569 			err = buf->ops->confirm(cs->pipe, buf);
570 			if (err)
571 				return err;
572 
573 			BUG_ON(!cs->nr_segs);
574 			cs->currbuf = buf;
575 			cs->mapaddr = buf->ops->map(cs->pipe, buf, 0);
576 			cs->len = buf->len;
577 			cs->buf = cs->mapaddr + buf->offset;
578 			cs->pipebufs++;
579 			cs->nr_segs--;
580 		} else {
581 			struct page *page;
582 
583 			if (cs->nr_segs == cs->pipe->buffers)
584 				return -EIO;
585 
586 			page = alloc_page(GFP_HIGHUSER);
587 			if (!page)
588 				return -ENOMEM;
589 
590 			buf->page = page;
591 			buf->offset = 0;
592 			buf->len = 0;
593 
594 			cs->currbuf = buf;
595 			cs->mapaddr = kmap(page);
596 			cs->buf = cs->mapaddr;
597 			cs->len = PAGE_SIZE;
598 			cs->pipebufs++;
599 			cs->nr_segs++;
600 		}
601 	} else {
602 		if (!cs->seglen) {
603 			BUG_ON(!cs->nr_segs);
604 			cs->seglen = cs->iov[0].iov_len;
605 			cs->addr = (unsigned long) cs->iov[0].iov_base;
606 			cs->iov++;
607 			cs->nr_segs--;
608 		}
609 		err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
610 		if (err < 0)
611 			return err;
612 		BUG_ON(err != 1);
613 		offset = cs->addr % PAGE_SIZE;
614 		cs->mapaddr = kmap(cs->pg);
615 		cs->buf = cs->mapaddr + offset;
616 		cs->len = min(PAGE_SIZE - offset, cs->seglen);
617 		cs->seglen -= cs->len;
618 		cs->addr += cs->len;
619 	}
620 
621 	return lock_request(cs->fc, cs->req);
622 }
623 
624 /* Do as much copy to/from userspace buffer as we can */
625 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
626 {
627 	unsigned ncpy = min(*size, cs->len);
628 	if (val) {
629 		if (cs->write)
630 			memcpy(cs->buf, *val, ncpy);
631 		else
632 			memcpy(*val, cs->buf, ncpy);
633 		*val += ncpy;
634 	}
635 	*size -= ncpy;
636 	cs->len -= ncpy;
637 	cs->buf += ncpy;
638 	return ncpy;
639 }
640 
641 static int fuse_check_page(struct page *page)
642 {
643 	if (page_mapcount(page) ||
644 	    page->mapping != NULL ||
645 	    page_count(page) != 1 ||
646 	    (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
647 	     ~(1 << PG_locked |
648 	       1 << PG_referenced |
649 	       1 << PG_uptodate |
650 	       1 << PG_lru |
651 	       1 << PG_active |
652 	       1 << PG_reclaim))) {
653 		printk(KERN_WARNING "fuse: trying to steal weird page\n");
654 		printk(KERN_WARNING "  page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
655 		return 1;
656 	}
657 	return 0;
658 }
659 
660 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
661 {
662 	int err;
663 	struct page *oldpage = *pagep;
664 	struct page *newpage;
665 	struct pipe_buffer *buf = cs->pipebufs;
666 	struct address_space *mapping;
667 	pgoff_t index;
668 
669 	unlock_request(cs->fc, cs->req);
670 	fuse_copy_finish(cs);
671 
672 	err = buf->ops->confirm(cs->pipe, buf);
673 	if (err)
674 		return err;
675 
676 	BUG_ON(!cs->nr_segs);
677 	cs->currbuf = buf;
678 	cs->len = buf->len;
679 	cs->pipebufs++;
680 	cs->nr_segs--;
681 
682 	if (cs->len != PAGE_SIZE)
683 		goto out_fallback;
684 
685 	if (buf->ops->steal(cs->pipe, buf) != 0)
686 		goto out_fallback;
687 
688 	newpage = buf->page;
689 
690 	if (WARN_ON(!PageUptodate(newpage)))
691 		return -EIO;
692 
693 	ClearPageMappedToDisk(newpage);
694 
695 	if (fuse_check_page(newpage) != 0)
696 		goto out_fallback_unlock;
697 
698 	mapping = oldpage->mapping;
699 	index = oldpage->index;
700 
701 	/*
702 	 * This is a new and locked page, it shouldn't be mapped or
703 	 * have any special flags on it
704 	 */
705 	if (WARN_ON(page_mapped(oldpage)))
706 		goto out_fallback_unlock;
707 	if (WARN_ON(page_has_private(oldpage)))
708 		goto out_fallback_unlock;
709 	if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
710 		goto out_fallback_unlock;
711 	if (WARN_ON(PageMlocked(oldpage)))
712 		goto out_fallback_unlock;
713 
714 	remove_from_page_cache(oldpage);
715 	page_cache_release(oldpage);
716 
717 	err = add_to_page_cache_locked(newpage, mapping, index, GFP_KERNEL);
718 	if (err) {
719 		printk(KERN_WARNING "fuse_try_move_page: failed to add page");
720 		goto out_fallback_unlock;
721 	}
722 	page_cache_get(newpage);
723 
724 	if (!(buf->flags & PIPE_BUF_FLAG_LRU))
725 		lru_cache_add_file(newpage);
726 
727 	err = 0;
728 	spin_lock(&cs->fc->lock);
729 	if (cs->req->aborted)
730 		err = -ENOENT;
731 	else
732 		*pagep = newpage;
733 	spin_unlock(&cs->fc->lock);
734 
735 	if (err) {
736 		unlock_page(newpage);
737 		page_cache_release(newpage);
738 		return err;
739 	}
740 
741 	unlock_page(oldpage);
742 	page_cache_release(oldpage);
743 	cs->len = 0;
744 
745 	return 0;
746 
747 out_fallback_unlock:
748 	unlock_page(newpage);
749 out_fallback:
750 	cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
751 	cs->buf = cs->mapaddr + buf->offset;
752 
753 	err = lock_request(cs->fc, cs->req);
754 	if (err)
755 		return err;
756 
757 	return 1;
758 }
759 
760 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
761 			 unsigned offset, unsigned count)
762 {
763 	struct pipe_buffer *buf;
764 
765 	if (cs->nr_segs == cs->pipe->buffers)
766 		return -EIO;
767 
768 	unlock_request(cs->fc, cs->req);
769 	fuse_copy_finish(cs);
770 
771 	buf = cs->pipebufs;
772 	page_cache_get(page);
773 	buf->page = page;
774 	buf->offset = offset;
775 	buf->len = count;
776 
777 	cs->pipebufs++;
778 	cs->nr_segs++;
779 	cs->len = 0;
780 
781 	return 0;
782 }
783 
784 /*
785  * Copy a page in the request to/from the userspace buffer.  Must be
786  * done atomically
787  */
788 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
789 			  unsigned offset, unsigned count, int zeroing)
790 {
791 	int err;
792 	struct page *page = *pagep;
793 
794 	if (page && zeroing && count < PAGE_SIZE) {
795 		void *mapaddr = kmap_atomic(page, KM_USER1);
796 		memset(mapaddr, 0, PAGE_SIZE);
797 		kunmap_atomic(mapaddr, KM_USER1);
798 	}
799 	while (count) {
800 		if (cs->write && cs->pipebufs && page) {
801 			return fuse_ref_page(cs, page, offset, count);
802 		} else if (!cs->len) {
803 			if (cs->move_pages && page &&
804 			    offset == 0 && count == PAGE_SIZE) {
805 				err = fuse_try_move_page(cs, pagep);
806 				if (err <= 0)
807 					return err;
808 			} else {
809 				err = fuse_copy_fill(cs);
810 				if (err)
811 					return err;
812 			}
813 		}
814 		if (page) {
815 			void *mapaddr = kmap_atomic(page, KM_USER1);
816 			void *buf = mapaddr + offset;
817 			offset += fuse_copy_do(cs, &buf, &count);
818 			kunmap_atomic(mapaddr, KM_USER1);
819 		} else
820 			offset += fuse_copy_do(cs, NULL, &count);
821 	}
822 	if (page && !cs->write)
823 		flush_dcache_page(page);
824 	return 0;
825 }
826 
827 /* Copy pages in the request to/from userspace buffer */
828 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
829 			   int zeroing)
830 {
831 	unsigned i;
832 	struct fuse_req *req = cs->req;
833 	unsigned offset = req->page_offset;
834 	unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
835 
836 	for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
837 		int err;
838 
839 		err = fuse_copy_page(cs, &req->pages[i], offset, count,
840 				     zeroing);
841 		if (err)
842 			return err;
843 
844 		nbytes -= count;
845 		count = min(nbytes, (unsigned) PAGE_SIZE);
846 		offset = 0;
847 	}
848 	return 0;
849 }
850 
851 /* Copy a single argument in the request to/from userspace buffer */
852 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
853 {
854 	while (size) {
855 		if (!cs->len) {
856 			int err = fuse_copy_fill(cs);
857 			if (err)
858 				return err;
859 		}
860 		fuse_copy_do(cs, &val, &size);
861 	}
862 	return 0;
863 }
864 
865 /* Copy request arguments to/from userspace buffer */
866 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
867 			  unsigned argpages, struct fuse_arg *args,
868 			  int zeroing)
869 {
870 	int err = 0;
871 	unsigned i;
872 
873 	for (i = 0; !err && i < numargs; i++)  {
874 		struct fuse_arg *arg = &args[i];
875 		if (i == numargs - 1 && argpages)
876 			err = fuse_copy_pages(cs, arg->size, zeroing);
877 		else
878 			err = fuse_copy_one(cs, arg->value, arg->size);
879 	}
880 	return err;
881 }
882 
883 static int request_pending(struct fuse_conn *fc)
884 {
885 	return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);
886 }
887 
888 /* Wait until a request is available on the pending list */
889 static void request_wait(struct fuse_conn *fc)
890 __releases(&fc->lock)
891 __acquires(&fc->lock)
892 {
893 	DECLARE_WAITQUEUE(wait, current);
894 
895 	add_wait_queue_exclusive(&fc->waitq, &wait);
896 	while (fc->connected && !request_pending(fc)) {
897 		set_current_state(TASK_INTERRUPTIBLE);
898 		if (signal_pending(current))
899 			break;
900 
901 		spin_unlock(&fc->lock);
902 		schedule();
903 		spin_lock(&fc->lock);
904 	}
905 	set_current_state(TASK_RUNNING);
906 	remove_wait_queue(&fc->waitq, &wait);
907 }
908 
909 /*
910  * Transfer an interrupt request to userspace
911  *
912  * Unlike other requests this is assembled on demand, without a need
913  * to allocate a separate fuse_req structure.
914  *
915  * Called with fc->lock held, releases it
916  */
917 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
918 			       size_t nbytes, struct fuse_req *req)
919 __releases(&fc->lock)
920 {
921 	struct fuse_in_header ih;
922 	struct fuse_interrupt_in arg;
923 	unsigned reqsize = sizeof(ih) + sizeof(arg);
924 	int err;
925 
926 	list_del_init(&req->intr_entry);
927 	req->intr_unique = fuse_get_unique(fc);
928 	memset(&ih, 0, sizeof(ih));
929 	memset(&arg, 0, sizeof(arg));
930 	ih.len = reqsize;
931 	ih.opcode = FUSE_INTERRUPT;
932 	ih.unique = req->intr_unique;
933 	arg.unique = req->in.h.unique;
934 
935 	spin_unlock(&fc->lock);
936 	if (nbytes < reqsize)
937 		return -EINVAL;
938 
939 	err = fuse_copy_one(cs, &ih, sizeof(ih));
940 	if (!err)
941 		err = fuse_copy_one(cs, &arg, sizeof(arg));
942 	fuse_copy_finish(cs);
943 
944 	return err ? err : reqsize;
945 }
946 
947 /*
948  * Read a single request into the userspace filesystem's buffer.  This
949  * function waits until a request is available, then removes it from
950  * the pending list and copies request data to userspace buffer.  If
951  * no reply is needed (FORGET) or request has been aborted or there
952  * was an error during the copying then it's finished by calling
953  * request_end().  Otherwise add it to the processing list, and set
954  * the 'sent' flag.
955  */
956 static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
957 				struct fuse_copy_state *cs, size_t nbytes)
958 {
959 	int err;
960 	struct fuse_req *req;
961 	struct fuse_in *in;
962 	unsigned reqsize;
963 
964  restart:
965 	spin_lock(&fc->lock);
966 	err = -EAGAIN;
967 	if ((file->f_flags & O_NONBLOCK) && fc->connected &&
968 	    !request_pending(fc))
969 		goto err_unlock;
970 
971 	request_wait(fc);
972 	err = -ENODEV;
973 	if (!fc->connected)
974 		goto err_unlock;
975 	err = -ERESTARTSYS;
976 	if (!request_pending(fc))
977 		goto err_unlock;
978 
979 	if (!list_empty(&fc->interrupts)) {
980 		req = list_entry(fc->interrupts.next, struct fuse_req,
981 				 intr_entry);
982 		return fuse_read_interrupt(fc, cs, nbytes, req);
983 	}
984 
985 	req = list_entry(fc->pending.next, struct fuse_req, list);
986 	req->state = FUSE_REQ_READING;
987 	list_move(&req->list, &fc->io);
988 
989 	in = &req->in;
990 	reqsize = in->h.len;
991 	/* If request is too large, reply with an error and restart the read */
992 	if (nbytes < reqsize) {
993 		req->out.h.error = -EIO;
994 		/* SETXATTR is special, since it may contain too large data */
995 		if (in->h.opcode == FUSE_SETXATTR)
996 			req->out.h.error = -E2BIG;
997 		request_end(fc, req);
998 		goto restart;
999 	}
1000 	spin_unlock(&fc->lock);
1001 	cs->req = req;
1002 	err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1003 	if (!err)
1004 		err = fuse_copy_args(cs, in->numargs, in->argpages,
1005 				     (struct fuse_arg *) in->args, 0);
1006 	fuse_copy_finish(cs);
1007 	spin_lock(&fc->lock);
1008 	req->locked = 0;
1009 	if (req->aborted) {
1010 		request_end(fc, req);
1011 		return -ENODEV;
1012 	}
1013 	if (err) {
1014 		req->out.h.error = -EIO;
1015 		request_end(fc, req);
1016 		return err;
1017 	}
1018 	if (!req->isreply)
1019 		request_end(fc, req);
1020 	else {
1021 		req->state = FUSE_REQ_SENT;
1022 		list_move_tail(&req->list, &fc->processing);
1023 		if (req->interrupted)
1024 			queue_interrupt(fc, req);
1025 		spin_unlock(&fc->lock);
1026 	}
1027 	return reqsize;
1028 
1029  err_unlock:
1030 	spin_unlock(&fc->lock);
1031 	return err;
1032 }
1033 
1034 static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
1035 			      unsigned long nr_segs, loff_t pos)
1036 {
1037 	struct fuse_copy_state cs;
1038 	struct file *file = iocb->ki_filp;
1039 	struct fuse_conn *fc = fuse_get_conn(file);
1040 	if (!fc)
1041 		return -EPERM;
1042 
1043 	fuse_copy_init(&cs, fc, 1, iov, nr_segs);
1044 
1045 	return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
1046 }
1047 
1048 static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
1049 				   struct pipe_buffer *buf)
1050 {
1051 	return 1;
1052 }
1053 
1054 static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
1055 	.can_merge = 0,
1056 	.map = generic_pipe_buf_map,
1057 	.unmap = generic_pipe_buf_unmap,
1058 	.confirm = generic_pipe_buf_confirm,
1059 	.release = generic_pipe_buf_release,
1060 	.steal = fuse_dev_pipe_buf_steal,
1061 	.get = generic_pipe_buf_get,
1062 };
1063 
1064 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1065 				    struct pipe_inode_info *pipe,
1066 				    size_t len, unsigned int flags)
1067 {
1068 	int ret;
1069 	int page_nr = 0;
1070 	int do_wakeup = 0;
1071 	struct pipe_buffer *bufs;
1072 	struct fuse_copy_state cs;
1073 	struct fuse_conn *fc = fuse_get_conn(in);
1074 	if (!fc)
1075 		return -EPERM;
1076 
1077 	bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL);
1078 	if (!bufs)
1079 		return -ENOMEM;
1080 
1081 	fuse_copy_init(&cs, fc, 1, NULL, 0);
1082 	cs.pipebufs = bufs;
1083 	cs.pipe = pipe;
1084 	ret = fuse_dev_do_read(fc, in, &cs, len);
1085 	if (ret < 0)
1086 		goto out;
1087 
1088 	ret = 0;
1089 	pipe_lock(pipe);
1090 
1091 	if (!pipe->readers) {
1092 		send_sig(SIGPIPE, current, 0);
1093 		if (!ret)
1094 			ret = -EPIPE;
1095 		goto out_unlock;
1096 	}
1097 
1098 	if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1099 		ret = -EIO;
1100 		goto out_unlock;
1101 	}
1102 
1103 	while (page_nr < cs.nr_segs) {
1104 		int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1105 		struct pipe_buffer *buf = pipe->bufs + newbuf;
1106 
1107 		buf->page = bufs[page_nr].page;
1108 		buf->offset = bufs[page_nr].offset;
1109 		buf->len = bufs[page_nr].len;
1110 		buf->ops = &fuse_dev_pipe_buf_ops;
1111 
1112 		pipe->nrbufs++;
1113 		page_nr++;
1114 		ret += buf->len;
1115 
1116 		if (pipe->inode)
1117 			do_wakeup = 1;
1118 	}
1119 
1120 out_unlock:
1121 	pipe_unlock(pipe);
1122 
1123 	if (do_wakeup) {
1124 		smp_mb();
1125 		if (waitqueue_active(&pipe->wait))
1126 			wake_up_interruptible(&pipe->wait);
1127 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1128 	}
1129 
1130 out:
1131 	for (; page_nr < cs.nr_segs; page_nr++)
1132 		page_cache_release(bufs[page_nr].page);
1133 
1134 	kfree(bufs);
1135 	return ret;
1136 }
1137 
1138 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1139 			    struct fuse_copy_state *cs)
1140 {
1141 	struct fuse_notify_poll_wakeup_out outarg;
1142 	int err = -EINVAL;
1143 
1144 	if (size != sizeof(outarg))
1145 		goto err;
1146 
1147 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1148 	if (err)
1149 		goto err;
1150 
1151 	fuse_copy_finish(cs);
1152 	return fuse_notify_poll_wakeup(fc, &outarg);
1153 
1154 err:
1155 	fuse_copy_finish(cs);
1156 	return err;
1157 }
1158 
1159 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1160 				   struct fuse_copy_state *cs)
1161 {
1162 	struct fuse_notify_inval_inode_out outarg;
1163 	int err = -EINVAL;
1164 
1165 	if (size != sizeof(outarg))
1166 		goto err;
1167 
1168 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1169 	if (err)
1170 		goto err;
1171 	fuse_copy_finish(cs);
1172 
1173 	down_read(&fc->killsb);
1174 	err = -ENOENT;
1175 	if (fc->sb) {
1176 		err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1177 					       outarg.off, outarg.len);
1178 	}
1179 	up_read(&fc->killsb);
1180 	return err;
1181 
1182 err:
1183 	fuse_copy_finish(cs);
1184 	return err;
1185 }
1186 
1187 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1188 				   struct fuse_copy_state *cs)
1189 {
1190 	struct fuse_notify_inval_entry_out outarg;
1191 	int err = -ENOMEM;
1192 	char *buf;
1193 	struct qstr name;
1194 
1195 	buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1196 	if (!buf)
1197 		goto err;
1198 
1199 	err = -EINVAL;
1200 	if (size < sizeof(outarg))
1201 		goto err;
1202 
1203 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1204 	if (err)
1205 		goto err;
1206 
1207 	err = -ENAMETOOLONG;
1208 	if (outarg.namelen > FUSE_NAME_MAX)
1209 		goto err;
1210 
1211 	name.name = buf;
1212 	name.len = outarg.namelen;
1213 	err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1214 	if (err)
1215 		goto err;
1216 	fuse_copy_finish(cs);
1217 	buf[outarg.namelen] = 0;
1218 	name.hash = full_name_hash(name.name, name.len);
1219 
1220 	down_read(&fc->killsb);
1221 	err = -ENOENT;
1222 	if (fc->sb)
1223 		err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
1224 	up_read(&fc->killsb);
1225 	kfree(buf);
1226 	return err;
1227 
1228 err:
1229 	kfree(buf);
1230 	fuse_copy_finish(cs);
1231 	return err;
1232 }
1233 
1234 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1235 		       unsigned int size, struct fuse_copy_state *cs)
1236 {
1237 	switch (code) {
1238 	case FUSE_NOTIFY_POLL:
1239 		return fuse_notify_poll(fc, size, cs);
1240 
1241 	case FUSE_NOTIFY_INVAL_INODE:
1242 		return fuse_notify_inval_inode(fc, size, cs);
1243 
1244 	case FUSE_NOTIFY_INVAL_ENTRY:
1245 		return fuse_notify_inval_entry(fc, size, cs);
1246 
1247 	default:
1248 		fuse_copy_finish(cs);
1249 		return -EINVAL;
1250 	}
1251 }
1252 
1253 /* Look up request on processing list by unique ID */
1254 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
1255 {
1256 	struct list_head *entry;
1257 
1258 	list_for_each(entry, &fc->processing) {
1259 		struct fuse_req *req;
1260 		req = list_entry(entry, struct fuse_req, list);
1261 		if (req->in.h.unique == unique || req->intr_unique == unique)
1262 			return req;
1263 	}
1264 	return NULL;
1265 }
1266 
1267 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1268 			 unsigned nbytes)
1269 {
1270 	unsigned reqsize = sizeof(struct fuse_out_header);
1271 
1272 	if (out->h.error)
1273 		return nbytes != reqsize ? -EINVAL : 0;
1274 
1275 	reqsize += len_args(out->numargs, out->args);
1276 
1277 	if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1278 		return -EINVAL;
1279 	else if (reqsize > nbytes) {
1280 		struct fuse_arg *lastarg = &out->args[out->numargs-1];
1281 		unsigned diffsize = reqsize - nbytes;
1282 		if (diffsize > lastarg->size)
1283 			return -EINVAL;
1284 		lastarg->size -= diffsize;
1285 	}
1286 	return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1287 			      out->page_zeroing);
1288 }
1289 
1290 /*
1291  * Write a single reply to a request.  First the header is copied from
1292  * the write buffer.  The request is then searched on the processing
1293  * list by the unique ID found in the header.  If found, then remove
1294  * it from the list and copy the rest of the buffer to the request.
1295  * The request is finished by calling request_end()
1296  */
1297 static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1298 				 struct fuse_copy_state *cs, size_t nbytes)
1299 {
1300 	int err;
1301 	struct fuse_req *req;
1302 	struct fuse_out_header oh;
1303 
1304 	if (nbytes < sizeof(struct fuse_out_header))
1305 		return -EINVAL;
1306 
1307 	err = fuse_copy_one(cs, &oh, sizeof(oh));
1308 	if (err)
1309 		goto err_finish;
1310 
1311 	err = -EINVAL;
1312 	if (oh.len != nbytes)
1313 		goto err_finish;
1314 
1315 	/*
1316 	 * Zero oh.unique indicates unsolicited notification message
1317 	 * and error contains notification code.
1318 	 */
1319 	if (!oh.unique) {
1320 		err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1321 		return err ? err : nbytes;
1322 	}
1323 
1324 	err = -EINVAL;
1325 	if (oh.error <= -1000 || oh.error > 0)
1326 		goto err_finish;
1327 
1328 	spin_lock(&fc->lock);
1329 	err = -ENOENT;
1330 	if (!fc->connected)
1331 		goto err_unlock;
1332 
1333 	req = request_find(fc, oh.unique);
1334 	if (!req)
1335 		goto err_unlock;
1336 
1337 	if (req->aborted) {
1338 		spin_unlock(&fc->lock);
1339 		fuse_copy_finish(cs);
1340 		spin_lock(&fc->lock);
1341 		request_end(fc, req);
1342 		return -ENOENT;
1343 	}
1344 	/* Is it an interrupt reply? */
1345 	if (req->intr_unique == oh.unique) {
1346 		err = -EINVAL;
1347 		if (nbytes != sizeof(struct fuse_out_header))
1348 			goto err_unlock;
1349 
1350 		if (oh.error == -ENOSYS)
1351 			fc->no_interrupt = 1;
1352 		else if (oh.error == -EAGAIN)
1353 			queue_interrupt(fc, req);
1354 
1355 		spin_unlock(&fc->lock);
1356 		fuse_copy_finish(cs);
1357 		return nbytes;
1358 	}
1359 
1360 	req->state = FUSE_REQ_WRITING;
1361 	list_move(&req->list, &fc->io);
1362 	req->out.h = oh;
1363 	req->locked = 1;
1364 	cs->req = req;
1365 	if (!req->out.page_replace)
1366 		cs->move_pages = 0;
1367 	spin_unlock(&fc->lock);
1368 
1369 	err = copy_out_args(cs, &req->out, nbytes);
1370 	fuse_copy_finish(cs);
1371 
1372 	spin_lock(&fc->lock);
1373 	req->locked = 0;
1374 	if (!err) {
1375 		if (req->aborted)
1376 			err = -ENOENT;
1377 	} else if (!req->aborted)
1378 		req->out.h.error = -EIO;
1379 	request_end(fc, req);
1380 
1381 	return err ? err : nbytes;
1382 
1383  err_unlock:
1384 	spin_unlock(&fc->lock);
1385  err_finish:
1386 	fuse_copy_finish(cs);
1387 	return err;
1388 }
1389 
1390 static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1391 			      unsigned long nr_segs, loff_t pos)
1392 {
1393 	struct fuse_copy_state cs;
1394 	struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1395 	if (!fc)
1396 		return -EPERM;
1397 
1398 	fuse_copy_init(&cs, fc, 0, iov, nr_segs);
1399 
1400 	return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
1401 }
1402 
1403 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1404 				     struct file *out, loff_t *ppos,
1405 				     size_t len, unsigned int flags)
1406 {
1407 	unsigned nbuf;
1408 	unsigned idx;
1409 	struct pipe_buffer *bufs;
1410 	struct fuse_copy_state cs;
1411 	struct fuse_conn *fc;
1412 	size_t rem;
1413 	ssize_t ret;
1414 
1415 	fc = fuse_get_conn(out);
1416 	if (!fc)
1417 		return -EPERM;
1418 
1419 	bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL);
1420 	if (!bufs)
1421 		return -ENOMEM;
1422 
1423 	pipe_lock(pipe);
1424 	nbuf = 0;
1425 	rem = 0;
1426 	for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1427 		rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1428 
1429 	ret = -EINVAL;
1430 	if (rem < len) {
1431 		pipe_unlock(pipe);
1432 		goto out;
1433 	}
1434 
1435 	rem = len;
1436 	while (rem) {
1437 		struct pipe_buffer *ibuf;
1438 		struct pipe_buffer *obuf;
1439 
1440 		BUG_ON(nbuf >= pipe->buffers);
1441 		BUG_ON(!pipe->nrbufs);
1442 		ibuf = &pipe->bufs[pipe->curbuf];
1443 		obuf = &bufs[nbuf];
1444 
1445 		if (rem >= ibuf->len) {
1446 			*obuf = *ibuf;
1447 			ibuf->ops = NULL;
1448 			pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1449 			pipe->nrbufs--;
1450 		} else {
1451 			ibuf->ops->get(pipe, ibuf);
1452 			*obuf = *ibuf;
1453 			obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1454 			obuf->len = rem;
1455 			ibuf->offset += obuf->len;
1456 			ibuf->len -= obuf->len;
1457 		}
1458 		nbuf++;
1459 		rem -= obuf->len;
1460 	}
1461 	pipe_unlock(pipe);
1462 
1463 	fuse_copy_init(&cs, fc, 0, NULL, nbuf);
1464 	cs.pipebufs = bufs;
1465 	cs.pipe = pipe;
1466 
1467 	if (flags & SPLICE_F_MOVE)
1468 		cs.move_pages = 1;
1469 
1470 	ret = fuse_dev_do_write(fc, &cs, len);
1471 
1472 	for (idx = 0; idx < nbuf; idx++) {
1473 		struct pipe_buffer *buf = &bufs[idx];
1474 		buf->ops->release(pipe, buf);
1475 	}
1476 out:
1477 	kfree(bufs);
1478 	return ret;
1479 }
1480 
1481 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
1482 {
1483 	unsigned mask = POLLOUT | POLLWRNORM;
1484 	struct fuse_conn *fc = fuse_get_conn(file);
1485 	if (!fc)
1486 		return POLLERR;
1487 
1488 	poll_wait(file, &fc->waitq, wait);
1489 
1490 	spin_lock(&fc->lock);
1491 	if (!fc->connected)
1492 		mask = POLLERR;
1493 	else if (request_pending(fc))
1494 		mask |= POLLIN | POLLRDNORM;
1495 	spin_unlock(&fc->lock);
1496 
1497 	return mask;
1498 }
1499 
1500 /*
1501  * Abort all requests on the given list (pending or processing)
1502  *
1503  * This function releases and reacquires fc->lock
1504  */
1505 static void end_requests(struct fuse_conn *fc, struct list_head *head)
1506 __releases(&fc->lock)
1507 __acquires(&fc->lock)
1508 {
1509 	while (!list_empty(head)) {
1510 		struct fuse_req *req;
1511 		req = list_entry(head->next, struct fuse_req, list);
1512 		req->out.h.error = -ECONNABORTED;
1513 		request_end(fc, req);
1514 		spin_lock(&fc->lock);
1515 	}
1516 }
1517 
1518 /*
1519  * Abort requests under I/O
1520  *
1521  * The requests are set to aborted and finished, and the request
1522  * waiter is woken up.  This will make request_wait_answer() wait
1523  * until the request is unlocked and then return.
1524  *
1525  * If the request is asynchronous, then the end function needs to be
1526  * called after waiting for the request to be unlocked (if it was
1527  * locked).
1528  */
1529 static void end_io_requests(struct fuse_conn *fc)
1530 __releases(&fc->lock)
1531 __acquires(&fc->lock)
1532 {
1533 	while (!list_empty(&fc->io)) {
1534 		struct fuse_req *req =
1535 			list_entry(fc->io.next, struct fuse_req, list);
1536 		void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
1537 
1538 		req->aborted = 1;
1539 		req->out.h.error = -ECONNABORTED;
1540 		req->state = FUSE_REQ_FINISHED;
1541 		list_del_init(&req->list);
1542 		wake_up(&req->waitq);
1543 		if (end) {
1544 			req->end = NULL;
1545 			__fuse_get_request(req);
1546 			spin_unlock(&fc->lock);
1547 			wait_event(req->waitq, !req->locked);
1548 			end(fc, req);
1549 			fuse_put_request(fc, req);
1550 			spin_lock(&fc->lock);
1551 		}
1552 	}
1553 }
1554 
1555 /*
1556  * Abort all requests.
1557  *
1558  * Emergency exit in case of a malicious or accidental deadlock, or
1559  * just a hung filesystem.
1560  *
1561  * The same effect is usually achievable through killing the
1562  * filesystem daemon and all users of the filesystem.  The exception
1563  * is the combination of an asynchronous request and the tricky
1564  * deadlock (see Documentation/filesystems/fuse.txt).
1565  *
1566  * During the aborting, progression of requests from the pending and
1567  * processing lists onto the io list, and progression of new requests
1568  * onto the pending list is prevented by req->connected being false.
1569  *
1570  * Progression of requests under I/O to the processing list is
1571  * prevented by the req->aborted flag being true for these requests.
1572  * For this reason requests on the io list must be aborted first.
1573  */
1574 void fuse_abort_conn(struct fuse_conn *fc)
1575 {
1576 	spin_lock(&fc->lock);
1577 	if (fc->connected) {
1578 		fc->connected = 0;
1579 		fc->blocked = 0;
1580 		end_io_requests(fc);
1581 		end_requests(fc, &fc->pending);
1582 		end_requests(fc, &fc->processing);
1583 		wake_up_all(&fc->waitq);
1584 		wake_up_all(&fc->blocked_waitq);
1585 		kill_fasync(&fc->fasync, SIGIO, POLL_IN);
1586 	}
1587 	spin_unlock(&fc->lock);
1588 }
1589 EXPORT_SYMBOL_GPL(fuse_abort_conn);
1590 
1591 int fuse_dev_release(struct inode *inode, struct file *file)
1592 {
1593 	struct fuse_conn *fc = fuse_get_conn(file);
1594 	if (fc) {
1595 		spin_lock(&fc->lock);
1596 		fc->connected = 0;
1597 		end_requests(fc, &fc->pending);
1598 		end_requests(fc, &fc->processing);
1599 		spin_unlock(&fc->lock);
1600 		fuse_conn_put(fc);
1601 	}
1602 
1603 	return 0;
1604 }
1605 EXPORT_SYMBOL_GPL(fuse_dev_release);
1606 
1607 static int fuse_dev_fasync(int fd, struct file *file, int on)
1608 {
1609 	struct fuse_conn *fc = fuse_get_conn(file);
1610 	if (!fc)
1611 		return -EPERM;
1612 
1613 	/* No locking - fasync_helper does its own locking */
1614 	return fasync_helper(fd, file, on, &fc->fasync);
1615 }
1616 
1617 const struct file_operations fuse_dev_operations = {
1618 	.owner		= THIS_MODULE,
1619 	.llseek		= no_llseek,
1620 	.read		= do_sync_read,
1621 	.aio_read	= fuse_dev_read,
1622 	.splice_read	= fuse_dev_splice_read,
1623 	.write		= do_sync_write,
1624 	.aio_write	= fuse_dev_write,
1625 	.splice_write	= fuse_dev_splice_write,
1626 	.poll		= fuse_dev_poll,
1627 	.release	= fuse_dev_release,
1628 	.fasync		= fuse_dev_fasync,
1629 };
1630 EXPORT_SYMBOL_GPL(fuse_dev_operations);
1631 
1632 static struct miscdevice fuse_miscdevice = {
1633 	.minor = FUSE_MINOR,
1634 	.name  = "fuse",
1635 	.fops = &fuse_dev_operations,
1636 };
1637 
1638 int __init fuse_dev_init(void)
1639 {
1640 	int err = -ENOMEM;
1641 	fuse_req_cachep = kmem_cache_create("fuse_request",
1642 					    sizeof(struct fuse_req),
1643 					    0, 0, NULL);
1644 	if (!fuse_req_cachep)
1645 		goto out;
1646 
1647 	err = misc_register(&fuse_miscdevice);
1648 	if (err)
1649 		goto out_cache_clean;
1650 
1651 	return 0;
1652 
1653  out_cache_clean:
1654 	kmem_cache_destroy(fuse_req_cachep);
1655  out:
1656 	return err;
1657 }
1658 
1659 void fuse_dev_cleanup(void)
1660 {
1661 	misc_deregister(&fuse_miscdevice);
1662 	kmem_cache_destroy(fuse_req_cachep);
1663 }
1664