xref: /openbmc/linux/fs/fuse/dev.c (revision 14d46d7abc3973a47e8eb0eb5eb87ee8d910a505)
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4 
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8 
9 #include "fuse_i.h"
10 
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/sched/signal.h>
15 #include <linux/uio.h>
16 #include <linux/miscdevice.h>
17 #include <linux/pagemap.h>
18 #include <linux/file.h>
19 #include <linux/slab.h>
20 #include <linux/pipe_fs_i.h>
21 #include <linux/swap.h>
22 #include <linux/splice.h>
23 #include <linux/sched.h>
24 
25 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
26 MODULE_ALIAS("devname:fuse");
27 
28 /* Ordinary requests have even IDs, while interrupts IDs are odd */
29 #define FUSE_INT_REQ_BIT (1ULL << 0)
30 #define FUSE_REQ_ID_STEP (1ULL << 1)
31 
32 static struct kmem_cache *fuse_req_cachep;
33 
34 static struct fuse_dev *fuse_get_dev(struct file *file)
35 {
36 	/*
37 	 * Lockless access is OK, because file->private data is set
38 	 * once during mount and is valid until the file is released.
39 	 */
40 	return READ_ONCE(file->private_data);
41 }
42 
43 static void fuse_request_init(struct fuse_req *req)
44 {
45 	INIT_LIST_HEAD(&req->list);
46 	INIT_LIST_HEAD(&req->intr_entry);
47 	init_waitqueue_head(&req->waitq);
48 	refcount_set(&req->count, 1);
49 	__set_bit(FR_PENDING, &req->flags);
50 }
51 
52 static struct fuse_req *fuse_request_alloc(gfp_t flags)
53 {
54 	struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
55 	if (req)
56 		fuse_request_init(req);
57 
58 	return req;
59 }
60 
61 static void fuse_request_free(struct fuse_req *req)
62 {
63 	kmem_cache_free(fuse_req_cachep, req);
64 }
65 
66 static void __fuse_get_request(struct fuse_req *req)
67 {
68 	refcount_inc(&req->count);
69 }
70 
71 /* Must be called with > 1 refcount */
72 static void __fuse_put_request(struct fuse_req *req)
73 {
74 	refcount_dec(&req->count);
75 }
76 
77 void fuse_set_initialized(struct fuse_conn *fc)
78 {
79 	/* Make sure stores before this are seen on another CPU */
80 	smp_wmb();
81 	fc->initialized = 1;
82 }
83 
84 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
85 {
86 	return !fc->initialized || (for_background && fc->blocked);
87 }
88 
89 static void fuse_drop_waiting(struct fuse_conn *fc)
90 {
91 	/*
92 	 * lockess check of fc->connected is okay, because atomic_dec_and_test()
93 	 * provides a memory barrier mached with the one in fuse_wait_aborted()
94 	 * to ensure no wake-up is missed.
95 	 */
96 	if (atomic_dec_and_test(&fc->num_waiting) &&
97 	    !READ_ONCE(fc->connected)) {
98 		/* wake up aborters */
99 		wake_up_all(&fc->blocked_waitq);
100 	}
101 }
102 
103 static void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req);
104 
105 static struct fuse_req *fuse_get_req(struct fuse_conn *fc, bool for_background)
106 {
107 	struct fuse_req *req;
108 	int err;
109 	atomic_inc(&fc->num_waiting);
110 
111 	if (fuse_block_alloc(fc, for_background)) {
112 		err = -EINTR;
113 		if (wait_event_killable_exclusive(fc->blocked_waitq,
114 				!fuse_block_alloc(fc, for_background)))
115 			goto out;
116 	}
117 	/* Matches smp_wmb() in fuse_set_initialized() */
118 	smp_rmb();
119 
120 	err = -ENOTCONN;
121 	if (!fc->connected)
122 		goto out;
123 
124 	err = -ECONNREFUSED;
125 	if (fc->conn_error)
126 		goto out;
127 
128 	req = fuse_request_alloc(GFP_KERNEL);
129 	err = -ENOMEM;
130 	if (!req) {
131 		if (for_background)
132 			wake_up(&fc->blocked_waitq);
133 		goto out;
134 	}
135 
136 	req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
137 	req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
138 	req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
139 
140 	__set_bit(FR_WAITING, &req->flags);
141 	if (for_background)
142 		__set_bit(FR_BACKGROUND, &req->flags);
143 
144 	if (unlikely(req->in.h.uid == ((uid_t)-1) ||
145 		     req->in.h.gid == ((gid_t)-1))) {
146 		fuse_put_request(fc, req);
147 		return ERR_PTR(-EOVERFLOW);
148 	}
149 	return req;
150 
151  out:
152 	fuse_drop_waiting(fc);
153 	return ERR_PTR(err);
154 }
155 
156 static void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
157 {
158 	if (refcount_dec_and_test(&req->count)) {
159 		if (test_bit(FR_BACKGROUND, &req->flags)) {
160 			/*
161 			 * We get here in the unlikely case that a background
162 			 * request was allocated but not sent
163 			 */
164 			spin_lock(&fc->bg_lock);
165 			if (!fc->blocked)
166 				wake_up(&fc->blocked_waitq);
167 			spin_unlock(&fc->bg_lock);
168 		}
169 
170 		if (test_bit(FR_WAITING, &req->flags)) {
171 			__clear_bit(FR_WAITING, &req->flags);
172 			fuse_drop_waiting(fc);
173 		}
174 
175 		fuse_request_free(req);
176 	}
177 }
178 EXPORT_SYMBOL_GPL(fuse_put_request);
179 
180 unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
181 {
182 	unsigned nbytes = 0;
183 	unsigned i;
184 
185 	for (i = 0; i < numargs; i++)
186 		nbytes += args[i].size;
187 
188 	return nbytes;
189 }
190 EXPORT_SYMBOL_GPL(fuse_len_args);
191 
192 static u64 fuse_get_unique(struct fuse_iqueue *fiq)
193 {
194 	fiq->reqctr += FUSE_REQ_ID_STEP;
195 	return fiq->reqctr;
196 }
197 
198 static unsigned int fuse_req_hash(u64 unique)
199 {
200 	return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS);
201 }
202 
203 static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
204 {
205 	req->in.h.len = sizeof(struct fuse_in_header) +
206 		fuse_len_args(req->args->in_numargs,
207 			      (struct fuse_arg *) req->args->in_args);
208 	list_add_tail(&req->list, &fiq->pending);
209 	wake_up(&fiq->waitq);
210 	kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
211 }
212 
213 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
214 		       u64 nodeid, u64 nlookup)
215 {
216 	struct fuse_iqueue *fiq = &fc->iq;
217 
218 	forget->forget_one.nodeid = nodeid;
219 	forget->forget_one.nlookup = nlookup;
220 
221 	spin_lock(&fiq->lock);
222 	if (fiq->connected) {
223 		fiq->forget_list_tail->next = forget;
224 		fiq->forget_list_tail = forget;
225 		wake_up(&fiq->waitq);
226 		kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
227 	} else {
228 		kfree(forget);
229 	}
230 	spin_unlock(&fiq->lock);
231 }
232 
233 static void flush_bg_queue(struct fuse_conn *fc)
234 {
235 	struct fuse_iqueue *fiq = &fc->iq;
236 
237 	while (fc->active_background < fc->max_background &&
238 	       !list_empty(&fc->bg_queue)) {
239 		struct fuse_req *req;
240 
241 		req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
242 		list_del(&req->list);
243 		fc->active_background++;
244 		spin_lock(&fiq->lock);
245 		req->in.h.unique = fuse_get_unique(fiq);
246 		queue_request(fiq, req);
247 		spin_unlock(&fiq->lock);
248 	}
249 }
250 
251 /*
252  * This function is called when a request is finished.  Either a reply
253  * has arrived or it was aborted (and not yet sent) or some error
254  * occurred during communication with userspace, or the device file
255  * was closed.  The requester thread is woken up (if still waiting),
256  * the 'end' callback is called if given, else the reference to the
257  * request is released
258  */
259 void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
260 {
261 	struct fuse_iqueue *fiq = &fc->iq;
262 	bool async = req->args->end;
263 
264 	if (test_and_set_bit(FR_FINISHED, &req->flags))
265 		goto put_request;
266 	/*
267 	 * test_and_set_bit() implies smp_mb() between bit
268 	 * changing and below intr_entry check. Pairs with
269 	 * smp_mb() from queue_interrupt().
270 	 */
271 	if (!list_empty(&req->intr_entry)) {
272 		spin_lock(&fiq->lock);
273 		list_del_init(&req->intr_entry);
274 		spin_unlock(&fiq->lock);
275 	}
276 	WARN_ON(test_bit(FR_PENDING, &req->flags));
277 	WARN_ON(test_bit(FR_SENT, &req->flags));
278 	if (test_bit(FR_BACKGROUND, &req->flags)) {
279 		spin_lock(&fc->bg_lock);
280 		clear_bit(FR_BACKGROUND, &req->flags);
281 		if (fc->num_background == fc->max_background) {
282 			fc->blocked = 0;
283 			wake_up(&fc->blocked_waitq);
284 		} else if (!fc->blocked) {
285 			/*
286 			 * Wake up next waiter, if any.  It's okay to use
287 			 * waitqueue_active(), as we've already synced up
288 			 * fc->blocked with waiters with the wake_up() call
289 			 * above.
290 			 */
291 			if (waitqueue_active(&fc->blocked_waitq))
292 				wake_up(&fc->blocked_waitq);
293 		}
294 
295 		if (fc->num_background == fc->congestion_threshold && fc->sb) {
296 			clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
297 			clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
298 		}
299 		fc->num_background--;
300 		fc->active_background--;
301 		flush_bg_queue(fc);
302 		spin_unlock(&fc->bg_lock);
303 	} else {
304 		/* Wake up waiter sleeping in request_wait_answer() */
305 		wake_up(&req->waitq);
306 	}
307 
308 	if (async)
309 		req->args->end(fc, req->args, req->out.h.error);
310 put_request:
311 	fuse_put_request(fc, req);
312 }
313 EXPORT_SYMBOL_GPL(fuse_request_end);
314 
315 static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
316 {
317 	spin_lock(&fiq->lock);
318 	/* Check for we've sent request to interrupt this req */
319 	if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
320 		spin_unlock(&fiq->lock);
321 		return -EINVAL;
322 	}
323 
324 	if (list_empty(&req->intr_entry)) {
325 		list_add_tail(&req->intr_entry, &fiq->interrupts);
326 		/*
327 		 * Pairs with smp_mb() implied by test_and_set_bit()
328 		 * from request_end().
329 		 */
330 		smp_mb();
331 		if (test_bit(FR_FINISHED, &req->flags)) {
332 			list_del_init(&req->intr_entry);
333 			spin_unlock(&fiq->lock);
334 			return 0;
335 		}
336 		wake_up(&fiq->waitq);
337 		kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
338 	}
339 	spin_unlock(&fiq->lock);
340 	return 0;
341 }
342 
343 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
344 {
345 	struct fuse_iqueue *fiq = &fc->iq;
346 	int err;
347 
348 	if (!fc->no_interrupt) {
349 		/* Any signal may interrupt this */
350 		err = wait_event_interruptible(req->waitq,
351 					test_bit(FR_FINISHED, &req->flags));
352 		if (!err)
353 			return;
354 
355 		set_bit(FR_INTERRUPTED, &req->flags);
356 		/* matches barrier in fuse_dev_do_read() */
357 		smp_mb__after_atomic();
358 		if (test_bit(FR_SENT, &req->flags))
359 			queue_interrupt(fiq, req);
360 	}
361 
362 	if (!test_bit(FR_FORCE, &req->flags)) {
363 		/* Only fatal signals may interrupt this */
364 		err = wait_event_killable(req->waitq,
365 					test_bit(FR_FINISHED, &req->flags));
366 		if (!err)
367 			return;
368 
369 		spin_lock(&fiq->lock);
370 		/* Request is not yet in userspace, bail out */
371 		if (test_bit(FR_PENDING, &req->flags)) {
372 			list_del(&req->list);
373 			spin_unlock(&fiq->lock);
374 			__fuse_put_request(req);
375 			req->out.h.error = -EINTR;
376 			return;
377 		}
378 		spin_unlock(&fiq->lock);
379 	}
380 
381 	/*
382 	 * Either request is already in userspace, or it was forced.
383 	 * Wait it out.
384 	 */
385 	wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
386 }
387 
388 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
389 {
390 	struct fuse_iqueue *fiq = &fc->iq;
391 
392 	BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
393 	spin_lock(&fiq->lock);
394 	if (!fiq->connected) {
395 		spin_unlock(&fiq->lock);
396 		req->out.h.error = -ENOTCONN;
397 	} else {
398 		req->in.h.unique = fuse_get_unique(fiq);
399 		queue_request(fiq, req);
400 		/* acquire extra reference, since request is still needed
401 		   after fuse_request_end() */
402 		__fuse_get_request(req);
403 		spin_unlock(&fiq->lock);
404 
405 		request_wait_answer(fc, req);
406 		/* Pairs with smp_wmb() in fuse_request_end() */
407 		smp_rmb();
408 	}
409 }
410 
411 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
412 {
413 	if (fc->minor < 4 && args->opcode == FUSE_STATFS)
414 		args->out_args[0].size = FUSE_COMPAT_STATFS_SIZE;
415 
416 	if (fc->minor < 9) {
417 		switch (args->opcode) {
418 		case FUSE_LOOKUP:
419 		case FUSE_CREATE:
420 		case FUSE_MKNOD:
421 		case FUSE_MKDIR:
422 		case FUSE_SYMLINK:
423 		case FUSE_LINK:
424 			args->out_args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
425 			break;
426 		case FUSE_GETATTR:
427 		case FUSE_SETATTR:
428 			args->out_args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
429 			break;
430 		}
431 	}
432 	if (fc->minor < 12) {
433 		switch (args->opcode) {
434 		case FUSE_CREATE:
435 			args->in_args[0].size = sizeof(struct fuse_open_in);
436 			break;
437 		case FUSE_MKNOD:
438 			args->in_args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
439 			break;
440 		}
441 	}
442 }
443 
444 static void fuse_force_creds(struct fuse_conn *fc, struct fuse_req *req)
445 {
446 	req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
447 	req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
448 	req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
449 }
450 
451 void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
452 {
453 	req->in.h.opcode = args->opcode;
454 	req->in.h.nodeid = args->nodeid;
455 	req->args = args;
456 }
457 
458 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
459 {
460 	struct fuse_req *req;
461 	ssize_t ret;
462 
463 	if (args->force) {
464 		atomic_inc(&fc->num_waiting);
465 		req = fuse_request_alloc(GFP_KERNEL | __GFP_NOFAIL);
466 
467 		if (!args->nocreds)
468 			fuse_force_creds(fc, req);
469 
470 		__set_bit(FR_WAITING, &req->flags);
471 		__set_bit(FR_FORCE, &req->flags);
472 	} else {
473 		WARN_ON(args->nocreds);
474 		req = fuse_get_req(fc, false);
475 		if (IS_ERR(req))
476 			return PTR_ERR(req);
477 	}
478 
479 	/* Needs to be done after fuse_get_req() so that fc->minor is valid */
480 	fuse_adjust_compat(fc, args);
481 	fuse_args_to_req(req, args);
482 
483 	if (!args->noreply)
484 		__set_bit(FR_ISREPLY, &req->flags);
485 	__fuse_request_send(fc, req);
486 	ret = req->out.h.error;
487 	if (!ret && args->out_argvar) {
488 		BUG_ON(args->out_numargs == 0);
489 		ret = args->out_args[args->out_numargs - 1].size;
490 	}
491 	fuse_put_request(fc, req);
492 
493 	return ret;
494 }
495 
496 static bool fuse_request_queue_background(struct fuse_conn *fc,
497 					  struct fuse_req *req)
498 {
499 	bool queued = false;
500 
501 	WARN_ON(!test_bit(FR_BACKGROUND, &req->flags));
502 	if (!test_bit(FR_WAITING, &req->flags)) {
503 		__set_bit(FR_WAITING, &req->flags);
504 		atomic_inc(&fc->num_waiting);
505 	}
506 	__set_bit(FR_ISREPLY, &req->flags);
507 	spin_lock(&fc->bg_lock);
508 	if (likely(fc->connected)) {
509 		fc->num_background++;
510 		if (fc->num_background == fc->max_background)
511 			fc->blocked = 1;
512 		if (fc->num_background == fc->congestion_threshold && fc->sb) {
513 			set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
514 			set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
515 		}
516 		list_add_tail(&req->list, &fc->bg_queue);
517 		flush_bg_queue(fc);
518 		queued = true;
519 	}
520 	spin_unlock(&fc->bg_lock);
521 
522 	return queued;
523 }
524 
525 int fuse_simple_background(struct fuse_conn *fc, struct fuse_args *args,
526 			    gfp_t gfp_flags)
527 {
528 	struct fuse_req *req;
529 
530 	if (args->force) {
531 		WARN_ON(!args->nocreds);
532 		req = fuse_request_alloc(gfp_flags);
533 		if (!req)
534 			return -ENOMEM;
535 		__set_bit(FR_BACKGROUND, &req->flags);
536 	} else {
537 		WARN_ON(args->nocreds);
538 		req = fuse_get_req(fc, true);
539 		if (IS_ERR(req))
540 			return PTR_ERR(req);
541 	}
542 
543 	fuse_args_to_req(req, args);
544 
545 	if (!fuse_request_queue_background(fc, req)) {
546 		fuse_put_request(fc, req);
547 		return -ENOTCONN;
548 	}
549 
550 	return 0;
551 }
552 EXPORT_SYMBOL_GPL(fuse_simple_background);
553 
554 static int fuse_simple_notify_reply(struct fuse_conn *fc,
555 				    struct fuse_args *args, u64 unique)
556 {
557 	struct fuse_req *req;
558 	struct fuse_iqueue *fiq = &fc->iq;
559 	int err = 0;
560 
561 	req = fuse_get_req(fc, false);
562 	if (IS_ERR(req))
563 		return PTR_ERR(req);
564 
565 	__clear_bit(FR_ISREPLY, &req->flags);
566 	req->in.h.unique = unique;
567 
568 	fuse_args_to_req(req, args);
569 
570 	spin_lock(&fiq->lock);
571 	if (fiq->connected) {
572 		queue_request(fiq, req);
573 		spin_unlock(&fiq->lock);
574 	} else {
575 		err = -ENODEV;
576 		spin_unlock(&fiq->lock);
577 		fuse_put_request(fc, req);
578 	}
579 	spin_unlock(&fiq->lock);
580 
581 	return err;
582 }
583 
584 /*
585  * Lock the request.  Up to the next unlock_request() there mustn't be
586  * anything that could cause a page-fault.  If the request was already
587  * aborted bail out.
588  */
589 static int lock_request(struct fuse_req *req)
590 {
591 	int err = 0;
592 	if (req) {
593 		spin_lock(&req->waitq.lock);
594 		if (test_bit(FR_ABORTED, &req->flags))
595 			err = -ENOENT;
596 		else
597 			set_bit(FR_LOCKED, &req->flags);
598 		spin_unlock(&req->waitq.lock);
599 	}
600 	return err;
601 }
602 
603 /*
604  * Unlock request.  If it was aborted while locked, caller is responsible
605  * for unlocking and ending the request.
606  */
607 static int unlock_request(struct fuse_req *req)
608 {
609 	int err = 0;
610 	if (req) {
611 		spin_lock(&req->waitq.lock);
612 		if (test_bit(FR_ABORTED, &req->flags))
613 			err = -ENOENT;
614 		else
615 			clear_bit(FR_LOCKED, &req->flags);
616 		spin_unlock(&req->waitq.lock);
617 	}
618 	return err;
619 }
620 
621 struct fuse_copy_state {
622 	int write;
623 	struct fuse_req *req;
624 	struct iov_iter *iter;
625 	struct pipe_buffer *pipebufs;
626 	struct pipe_buffer *currbuf;
627 	struct pipe_inode_info *pipe;
628 	unsigned long nr_segs;
629 	struct page *pg;
630 	unsigned len;
631 	unsigned offset;
632 	unsigned move_pages:1;
633 };
634 
635 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
636 			   struct iov_iter *iter)
637 {
638 	memset(cs, 0, sizeof(*cs));
639 	cs->write = write;
640 	cs->iter = iter;
641 }
642 
643 /* Unmap and put previous page of userspace buffer */
644 static void fuse_copy_finish(struct fuse_copy_state *cs)
645 {
646 	if (cs->currbuf) {
647 		struct pipe_buffer *buf = cs->currbuf;
648 
649 		if (cs->write)
650 			buf->len = PAGE_SIZE - cs->len;
651 		cs->currbuf = NULL;
652 	} else if (cs->pg) {
653 		if (cs->write) {
654 			flush_dcache_page(cs->pg);
655 			set_page_dirty_lock(cs->pg);
656 		}
657 		put_page(cs->pg);
658 	}
659 	cs->pg = NULL;
660 }
661 
662 /*
663  * Get another pagefull of userspace buffer, and map it to kernel
664  * address space, and lock request
665  */
666 static int fuse_copy_fill(struct fuse_copy_state *cs)
667 {
668 	struct page *page;
669 	int err;
670 
671 	err = unlock_request(cs->req);
672 	if (err)
673 		return err;
674 
675 	fuse_copy_finish(cs);
676 	if (cs->pipebufs) {
677 		struct pipe_buffer *buf = cs->pipebufs;
678 
679 		if (!cs->write) {
680 			err = pipe_buf_confirm(cs->pipe, buf);
681 			if (err)
682 				return err;
683 
684 			BUG_ON(!cs->nr_segs);
685 			cs->currbuf = buf;
686 			cs->pg = buf->page;
687 			cs->offset = buf->offset;
688 			cs->len = buf->len;
689 			cs->pipebufs++;
690 			cs->nr_segs--;
691 		} else {
692 			if (cs->nr_segs == cs->pipe->buffers)
693 				return -EIO;
694 
695 			page = alloc_page(GFP_HIGHUSER);
696 			if (!page)
697 				return -ENOMEM;
698 
699 			buf->page = page;
700 			buf->offset = 0;
701 			buf->len = 0;
702 
703 			cs->currbuf = buf;
704 			cs->pg = page;
705 			cs->offset = 0;
706 			cs->len = PAGE_SIZE;
707 			cs->pipebufs++;
708 			cs->nr_segs++;
709 		}
710 	} else {
711 		size_t off;
712 		err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
713 		if (err < 0)
714 			return err;
715 		BUG_ON(!err);
716 		cs->len = err;
717 		cs->offset = off;
718 		cs->pg = page;
719 		iov_iter_advance(cs->iter, err);
720 	}
721 
722 	return lock_request(cs->req);
723 }
724 
725 /* Do as much copy to/from userspace buffer as we can */
726 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
727 {
728 	unsigned ncpy = min(*size, cs->len);
729 	if (val) {
730 		void *pgaddr = kmap_atomic(cs->pg);
731 		void *buf = pgaddr + cs->offset;
732 
733 		if (cs->write)
734 			memcpy(buf, *val, ncpy);
735 		else
736 			memcpy(*val, buf, ncpy);
737 
738 		kunmap_atomic(pgaddr);
739 		*val += ncpy;
740 	}
741 	*size -= ncpy;
742 	cs->len -= ncpy;
743 	cs->offset += ncpy;
744 	return ncpy;
745 }
746 
747 static int fuse_check_page(struct page *page)
748 {
749 	if (page_mapcount(page) ||
750 	    page->mapping != NULL ||
751 	    page_count(page) != 1 ||
752 	    (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
753 	     ~(1 << PG_locked |
754 	       1 << PG_referenced |
755 	       1 << PG_uptodate |
756 	       1 << PG_lru |
757 	       1 << PG_active |
758 	       1 << PG_reclaim))) {
759 		pr_warn("trying to steal weird page\n");
760 		pr_warn("  page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
761 		return 1;
762 	}
763 	return 0;
764 }
765 
766 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
767 {
768 	int err;
769 	struct page *oldpage = *pagep;
770 	struct page *newpage;
771 	struct pipe_buffer *buf = cs->pipebufs;
772 
773 	err = unlock_request(cs->req);
774 	if (err)
775 		return err;
776 
777 	fuse_copy_finish(cs);
778 
779 	err = pipe_buf_confirm(cs->pipe, buf);
780 	if (err)
781 		return err;
782 
783 	BUG_ON(!cs->nr_segs);
784 	cs->currbuf = buf;
785 	cs->len = buf->len;
786 	cs->pipebufs++;
787 	cs->nr_segs--;
788 
789 	if (cs->len != PAGE_SIZE)
790 		goto out_fallback;
791 
792 	if (pipe_buf_steal(cs->pipe, buf) != 0)
793 		goto out_fallback;
794 
795 	newpage = buf->page;
796 
797 	if (!PageUptodate(newpage))
798 		SetPageUptodate(newpage);
799 
800 	ClearPageMappedToDisk(newpage);
801 
802 	if (fuse_check_page(newpage) != 0)
803 		goto out_fallback_unlock;
804 
805 	/*
806 	 * This is a new and locked page, it shouldn't be mapped or
807 	 * have any special flags on it
808 	 */
809 	if (WARN_ON(page_mapped(oldpage)))
810 		goto out_fallback_unlock;
811 	if (WARN_ON(page_has_private(oldpage)))
812 		goto out_fallback_unlock;
813 	if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
814 		goto out_fallback_unlock;
815 	if (WARN_ON(PageMlocked(oldpage)))
816 		goto out_fallback_unlock;
817 
818 	err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
819 	if (err) {
820 		unlock_page(newpage);
821 		return err;
822 	}
823 
824 	get_page(newpage);
825 
826 	if (!(buf->flags & PIPE_BUF_FLAG_LRU))
827 		lru_cache_add_file(newpage);
828 
829 	err = 0;
830 	spin_lock(&cs->req->waitq.lock);
831 	if (test_bit(FR_ABORTED, &cs->req->flags))
832 		err = -ENOENT;
833 	else
834 		*pagep = newpage;
835 	spin_unlock(&cs->req->waitq.lock);
836 
837 	if (err) {
838 		unlock_page(newpage);
839 		put_page(newpage);
840 		return err;
841 	}
842 
843 	unlock_page(oldpage);
844 	put_page(oldpage);
845 	cs->len = 0;
846 
847 	return 0;
848 
849 out_fallback_unlock:
850 	unlock_page(newpage);
851 out_fallback:
852 	cs->pg = buf->page;
853 	cs->offset = buf->offset;
854 
855 	err = lock_request(cs->req);
856 	if (err)
857 		return err;
858 
859 	return 1;
860 }
861 
862 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
863 			 unsigned offset, unsigned count)
864 {
865 	struct pipe_buffer *buf;
866 	int err;
867 
868 	if (cs->nr_segs == cs->pipe->buffers)
869 		return -EIO;
870 
871 	err = unlock_request(cs->req);
872 	if (err)
873 		return err;
874 
875 	fuse_copy_finish(cs);
876 
877 	buf = cs->pipebufs;
878 	get_page(page);
879 	buf->page = page;
880 	buf->offset = offset;
881 	buf->len = count;
882 
883 	cs->pipebufs++;
884 	cs->nr_segs++;
885 	cs->len = 0;
886 
887 	return 0;
888 }
889 
890 /*
891  * Copy a page in the request to/from the userspace buffer.  Must be
892  * done atomically
893  */
894 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
895 			  unsigned offset, unsigned count, int zeroing)
896 {
897 	int err;
898 	struct page *page = *pagep;
899 
900 	if (page && zeroing && count < PAGE_SIZE)
901 		clear_highpage(page);
902 
903 	while (count) {
904 		if (cs->write && cs->pipebufs && page) {
905 			return fuse_ref_page(cs, page, offset, count);
906 		} else if (!cs->len) {
907 			if (cs->move_pages && page &&
908 			    offset == 0 && count == PAGE_SIZE) {
909 				err = fuse_try_move_page(cs, pagep);
910 				if (err <= 0)
911 					return err;
912 			} else {
913 				err = fuse_copy_fill(cs);
914 				if (err)
915 					return err;
916 			}
917 		}
918 		if (page) {
919 			void *mapaddr = kmap_atomic(page);
920 			void *buf = mapaddr + offset;
921 			offset += fuse_copy_do(cs, &buf, &count);
922 			kunmap_atomic(mapaddr);
923 		} else
924 			offset += fuse_copy_do(cs, NULL, &count);
925 	}
926 	if (page && !cs->write)
927 		flush_dcache_page(page);
928 	return 0;
929 }
930 
931 /* Copy pages in the request to/from userspace buffer */
932 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
933 			   int zeroing)
934 {
935 	unsigned i;
936 	struct fuse_req *req = cs->req;
937 	struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
938 
939 
940 	for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) {
941 		int err;
942 		unsigned int offset = ap->descs[i].offset;
943 		unsigned int count = min(nbytes, ap->descs[i].length);
944 
945 		err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing);
946 		if (err)
947 			return err;
948 
949 		nbytes -= count;
950 	}
951 	return 0;
952 }
953 
954 /* Copy a single argument in the request to/from userspace buffer */
955 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
956 {
957 	while (size) {
958 		if (!cs->len) {
959 			int err = fuse_copy_fill(cs);
960 			if (err)
961 				return err;
962 		}
963 		fuse_copy_do(cs, &val, &size);
964 	}
965 	return 0;
966 }
967 
968 /* Copy request arguments to/from userspace buffer */
969 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
970 			  unsigned argpages, struct fuse_arg *args,
971 			  int zeroing)
972 {
973 	int err = 0;
974 	unsigned i;
975 
976 	for (i = 0; !err && i < numargs; i++)  {
977 		struct fuse_arg *arg = &args[i];
978 		if (i == numargs - 1 && argpages)
979 			err = fuse_copy_pages(cs, arg->size, zeroing);
980 		else
981 			err = fuse_copy_one(cs, arg->value, arg->size);
982 	}
983 	return err;
984 }
985 
986 static int forget_pending(struct fuse_iqueue *fiq)
987 {
988 	return fiq->forget_list_head.next != NULL;
989 }
990 
991 static int request_pending(struct fuse_iqueue *fiq)
992 {
993 	return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
994 		forget_pending(fiq);
995 }
996 
997 /*
998  * Transfer an interrupt request to userspace
999  *
1000  * Unlike other requests this is assembled on demand, without a need
1001  * to allocate a separate fuse_req structure.
1002  *
1003  * Called with fiq->lock held, releases it
1004  */
1005 static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1006 			       struct fuse_copy_state *cs,
1007 			       size_t nbytes, struct fuse_req *req)
1008 __releases(fiq->lock)
1009 {
1010 	struct fuse_in_header ih;
1011 	struct fuse_interrupt_in arg;
1012 	unsigned reqsize = sizeof(ih) + sizeof(arg);
1013 	int err;
1014 
1015 	list_del_init(&req->intr_entry);
1016 	memset(&ih, 0, sizeof(ih));
1017 	memset(&arg, 0, sizeof(arg));
1018 	ih.len = reqsize;
1019 	ih.opcode = FUSE_INTERRUPT;
1020 	ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
1021 	arg.unique = req->in.h.unique;
1022 
1023 	spin_unlock(&fiq->lock);
1024 	if (nbytes < reqsize)
1025 		return -EINVAL;
1026 
1027 	err = fuse_copy_one(cs, &ih, sizeof(ih));
1028 	if (!err)
1029 		err = fuse_copy_one(cs, &arg, sizeof(arg));
1030 	fuse_copy_finish(cs);
1031 
1032 	return err ? err : reqsize;
1033 }
1034 
1035 static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
1036 					       unsigned max,
1037 					       unsigned *countp)
1038 {
1039 	struct fuse_forget_link *head = fiq->forget_list_head.next;
1040 	struct fuse_forget_link **newhead = &head;
1041 	unsigned count;
1042 
1043 	for (count = 0; *newhead != NULL && count < max; count++)
1044 		newhead = &(*newhead)->next;
1045 
1046 	fiq->forget_list_head.next = *newhead;
1047 	*newhead = NULL;
1048 	if (fiq->forget_list_head.next == NULL)
1049 		fiq->forget_list_tail = &fiq->forget_list_head;
1050 
1051 	if (countp != NULL)
1052 		*countp = count;
1053 
1054 	return head;
1055 }
1056 
1057 static int fuse_read_single_forget(struct fuse_iqueue *fiq,
1058 				   struct fuse_copy_state *cs,
1059 				   size_t nbytes)
1060 __releases(fiq->lock)
1061 {
1062 	int err;
1063 	struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
1064 	struct fuse_forget_in arg = {
1065 		.nlookup = forget->forget_one.nlookup,
1066 	};
1067 	struct fuse_in_header ih = {
1068 		.opcode = FUSE_FORGET,
1069 		.nodeid = forget->forget_one.nodeid,
1070 		.unique = fuse_get_unique(fiq),
1071 		.len = sizeof(ih) + sizeof(arg),
1072 	};
1073 
1074 	spin_unlock(&fiq->lock);
1075 	kfree(forget);
1076 	if (nbytes < ih.len)
1077 		return -EINVAL;
1078 
1079 	err = fuse_copy_one(cs, &ih, sizeof(ih));
1080 	if (!err)
1081 		err = fuse_copy_one(cs, &arg, sizeof(arg));
1082 	fuse_copy_finish(cs);
1083 
1084 	if (err)
1085 		return err;
1086 
1087 	return ih.len;
1088 }
1089 
1090 static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
1091 				   struct fuse_copy_state *cs, size_t nbytes)
1092 __releases(fiq->lock)
1093 {
1094 	int err;
1095 	unsigned max_forgets;
1096 	unsigned count;
1097 	struct fuse_forget_link *head;
1098 	struct fuse_batch_forget_in arg = { .count = 0 };
1099 	struct fuse_in_header ih = {
1100 		.opcode = FUSE_BATCH_FORGET,
1101 		.unique = fuse_get_unique(fiq),
1102 		.len = sizeof(ih) + sizeof(arg),
1103 	};
1104 
1105 	if (nbytes < ih.len) {
1106 		spin_unlock(&fiq->lock);
1107 		return -EINVAL;
1108 	}
1109 
1110 	max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1111 	head = dequeue_forget(fiq, max_forgets, &count);
1112 	spin_unlock(&fiq->lock);
1113 
1114 	arg.count = count;
1115 	ih.len += count * sizeof(struct fuse_forget_one);
1116 	err = fuse_copy_one(cs, &ih, sizeof(ih));
1117 	if (!err)
1118 		err = fuse_copy_one(cs, &arg, sizeof(arg));
1119 
1120 	while (head) {
1121 		struct fuse_forget_link *forget = head;
1122 
1123 		if (!err) {
1124 			err = fuse_copy_one(cs, &forget->forget_one,
1125 					    sizeof(forget->forget_one));
1126 		}
1127 		head = forget->next;
1128 		kfree(forget);
1129 	}
1130 
1131 	fuse_copy_finish(cs);
1132 
1133 	if (err)
1134 		return err;
1135 
1136 	return ih.len;
1137 }
1138 
1139 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1140 			    struct fuse_copy_state *cs,
1141 			    size_t nbytes)
1142 __releases(fiq->lock)
1143 {
1144 	if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
1145 		return fuse_read_single_forget(fiq, cs, nbytes);
1146 	else
1147 		return fuse_read_batch_forget(fiq, cs, nbytes);
1148 }
1149 
1150 /*
1151  * Read a single request into the userspace filesystem's buffer.  This
1152  * function waits until a request is available, then removes it from
1153  * the pending list and copies request data to userspace buffer.  If
1154  * no reply is needed (FORGET) or request has been aborted or there
1155  * was an error during the copying then it's finished by calling
1156  * fuse_request_end().  Otherwise add it to the processing list, and set
1157  * the 'sent' flag.
1158  */
1159 static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
1160 				struct fuse_copy_state *cs, size_t nbytes)
1161 {
1162 	ssize_t err;
1163 	struct fuse_conn *fc = fud->fc;
1164 	struct fuse_iqueue *fiq = &fc->iq;
1165 	struct fuse_pqueue *fpq = &fud->pq;
1166 	struct fuse_req *req;
1167 	struct fuse_args *args;
1168 	unsigned reqsize;
1169 	unsigned int hash;
1170 
1171 	/*
1172 	 * Require sane minimum read buffer - that has capacity for fixed part
1173 	 * of any request header + negotiated max_write room for data.
1174 	 *
1175 	 * Historically libfuse reserves 4K for fixed header room, but e.g.
1176 	 * GlusterFS reserves only 80 bytes
1177 	 *
1178 	 *	= `sizeof(fuse_in_header) + sizeof(fuse_write_in)`
1179 	 *
1180 	 * which is the absolute minimum any sane filesystem should be using
1181 	 * for header room.
1182 	 */
1183 	if (nbytes < max_t(size_t, FUSE_MIN_READ_BUFFER,
1184 			   sizeof(struct fuse_in_header) +
1185 			   sizeof(struct fuse_write_in) +
1186 			   fc->max_write))
1187 		return -EINVAL;
1188 
1189  restart:
1190 	for (;;) {
1191 		spin_lock(&fiq->lock);
1192 		if (!fiq->connected || request_pending(fiq))
1193 			break;
1194 		spin_unlock(&fiq->lock);
1195 
1196 		if (file->f_flags & O_NONBLOCK)
1197 			return -EAGAIN;
1198 		err = wait_event_interruptible_exclusive(fiq->waitq,
1199 				!fiq->connected || request_pending(fiq));
1200 		if (err)
1201 			return err;
1202 	}
1203 
1204 	if (!fiq->connected) {
1205 		err = fc->aborted ? -ECONNABORTED : -ENODEV;
1206 		goto err_unlock;
1207 	}
1208 
1209 	if (!list_empty(&fiq->interrupts)) {
1210 		req = list_entry(fiq->interrupts.next, struct fuse_req,
1211 				 intr_entry);
1212 		return fuse_read_interrupt(fiq, cs, nbytes, req);
1213 	}
1214 
1215 	if (forget_pending(fiq)) {
1216 		if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
1217 			return fuse_read_forget(fc, fiq, cs, nbytes);
1218 
1219 		if (fiq->forget_batch <= -8)
1220 			fiq->forget_batch = 16;
1221 	}
1222 
1223 	req = list_entry(fiq->pending.next, struct fuse_req, list);
1224 	clear_bit(FR_PENDING, &req->flags);
1225 	list_del_init(&req->list);
1226 	spin_unlock(&fiq->lock);
1227 
1228 	args = req->args;
1229 	reqsize = req->in.h.len;
1230 
1231 	/* If request is too large, reply with an error and restart the read */
1232 	if (nbytes < reqsize) {
1233 		req->out.h.error = -EIO;
1234 		/* SETXATTR is special, since it may contain too large data */
1235 		if (args->opcode == FUSE_SETXATTR)
1236 			req->out.h.error = -E2BIG;
1237 		fuse_request_end(fc, req);
1238 		goto restart;
1239 	}
1240 	spin_lock(&fpq->lock);
1241 	list_add(&req->list, &fpq->io);
1242 	spin_unlock(&fpq->lock);
1243 	cs->req = req;
1244 	err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h));
1245 	if (!err)
1246 		err = fuse_copy_args(cs, args->in_numargs, args->in_pages,
1247 				     (struct fuse_arg *) args->in_args, 0);
1248 	fuse_copy_finish(cs);
1249 	spin_lock(&fpq->lock);
1250 	clear_bit(FR_LOCKED, &req->flags);
1251 	if (!fpq->connected) {
1252 		err = fc->aborted ? -ECONNABORTED : -ENODEV;
1253 		goto out_end;
1254 	}
1255 	if (err) {
1256 		req->out.h.error = -EIO;
1257 		goto out_end;
1258 	}
1259 	if (!test_bit(FR_ISREPLY, &req->flags)) {
1260 		err = reqsize;
1261 		goto out_end;
1262 	}
1263 	hash = fuse_req_hash(req->in.h.unique);
1264 	list_move_tail(&req->list, &fpq->processing[hash]);
1265 	__fuse_get_request(req);
1266 	set_bit(FR_SENT, &req->flags);
1267 	spin_unlock(&fpq->lock);
1268 	/* matches barrier in request_wait_answer() */
1269 	smp_mb__after_atomic();
1270 	if (test_bit(FR_INTERRUPTED, &req->flags))
1271 		queue_interrupt(fiq, req);
1272 	fuse_put_request(fc, req);
1273 
1274 	return reqsize;
1275 
1276 out_end:
1277 	if (!test_bit(FR_PRIVATE, &req->flags))
1278 		list_del_init(&req->list);
1279 	spin_unlock(&fpq->lock);
1280 	fuse_request_end(fc, req);
1281 	return err;
1282 
1283  err_unlock:
1284 	spin_unlock(&fiq->lock);
1285 	return err;
1286 }
1287 
1288 static int fuse_dev_open(struct inode *inode, struct file *file)
1289 {
1290 	/*
1291 	 * The fuse device's file's private_data is used to hold
1292 	 * the fuse_conn(ection) when it is mounted, and is used to
1293 	 * keep track of whether the file has been mounted already.
1294 	 */
1295 	file->private_data = NULL;
1296 	return 0;
1297 }
1298 
1299 static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
1300 {
1301 	struct fuse_copy_state cs;
1302 	struct file *file = iocb->ki_filp;
1303 	struct fuse_dev *fud = fuse_get_dev(file);
1304 
1305 	if (!fud)
1306 		return -EPERM;
1307 
1308 	if (!iter_is_iovec(to))
1309 		return -EINVAL;
1310 
1311 	fuse_copy_init(&cs, 1, to);
1312 
1313 	return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
1314 }
1315 
1316 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1317 				    struct pipe_inode_info *pipe,
1318 				    size_t len, unsigned int flags)
1319 {
1320 	int total, ret;
1321 	int page_nr = 0;
1322 	struct pipe_buffer *bufs;
1323 	struct fuse_copy_state cs;
1324 	struct fuse_dev *fud = fuse_get_dev(in);
1325 
1326 	if (!fud)
1327 		return -EPERM;
1328 
1329 	bufs = kvmalloc_array(pipe->buffers, sizeof(struct pipe_buffer),
1330 			      GFP_KERNEL);
1331 	if (!bufs)
1332 		return -ENOMEM;
1333 
1334 	fuse_copy_init(&cs, 1, NULL);
1335 	cs.pipebufs = bufs;
1336 	cs.pipe = pipe;
1337 	ret = fuse_dev_do_read(fud, in, &cs, len);
1338 	if (ret < 0)
1339 		goto out;
1340 
1341 	if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1342 		ret = -EIO;
1343 		goto out;
1344 	}
1345 
1346 	for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
1347 		/*
1348 		 * Need to be careful about this.  Having buf->ops in module
1349 		 * code can Oops if the buffer persists after module unload.
1350 		 */
1351 		bufs[page_nr].ops = &nosteal_pipe_buf_ops;
1352 		bufs[page_nr].flags = 0;
1353 		ret = add_to_pipe(pipe, &bufs[page_nr++]);
1354 		if (unlikely(ret < 0))
1355 			break;
1356 	}
1357 	if (total)
1358 		ret = total;
1359 out:
1360 	for (; page_nr < cs.nr_segs; page_nr++)
1361 		put_page(bufs[page_nr].page);
1362 
1363 	kvfree(bufs);
1364 	return ret;
1365 }
1366 
1367 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1368 			    struct fuse_copy_state *cs)
1369 {
1370 	struct fuse_notify_poll_wakeup_out outarg;
1371 	int err = -EINVAL;
1372 
1373 	if (size != sizeof(outarg))
1374 		goto err;
1375 
1376 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1377 	if (err)
1378 		goto err;
1379 
1380 	fuse_copy_finish(cs);
1381 	return fuse_notify_poll_wakeup(fc, &outarg);
1382 
1383 err:
1384 	fuse_copy_finish(cs);
1385 	return err;
1386 }
1387 
1388 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1389 				   struct fuse_copy_state *cs)
1390 {
1391 	struct fuse_notify_inval_inode_out outarg;
1392 	int err = -EINVAL;
1393 
1394 	if (size != sizeof(outarg))
1395 		goto err;
1396 
1397 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1398 	if (err)
1399 		goto err;
1400 	fuse_copy_finish(cs);
1401 
1402 	down_read(&fc->killsb);
1403 	err = -ENOENT;
1404 	if (fc->sb) {
1405 		err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1406 					       outarg.off, outarg.len);
1407 	}
1408 	up_read(&fc->killsb);
1409 	return err;
1410 
1411 err:
1412 	fuse_copy_finish(cs);
1413 	return err;
1414 }
1415 
1416 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1417 				   struct fuse_copy_state *cs)
1418 {
1419 	struct fuse_notify_inval_entry_out outarg;
1420 	int err = -ENOMEM;
1421 	char *buf;
1422 	struct qstr name;
1423 
1424 	buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1425 	if (!buf)
1426 		goto err;
1427 
1428 	err = -EINVAL;
1429 	if (size < sizeof(outarg))
1430 		goto err;
1431 
1432 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1433 	if (err)
1434 		goto err;
1435 
1436 	err = -ENAMETOOLONG;
1437 	if (outarg.namelen > FUSE_NAME_MAX)
1438 		goto err;
1439 
1440 	err = -EINVAL;
1441 	if (size != sizeof(outarg) + outarg.namelen + 1)
1442 		goto err;
1443 
1444 	name.name = buf;
1445 	name.len = outarg.namelen;
1446 	err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1447 	if (err)
1448 		goto err;
1449 	fuse_copy_finish(cs);
1450 	buf[outarg.namelen] = 0;
1451 
1452 	down_read(&fc->killsb);
1453 	err = -ENOENT;
1454 	if (fc->sb)
1455 		err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1456 	up_read(&fc->killsb);
1457 	kfree(buf);
1458 	return err;
1459 
1460 err:
1461 	kfree(buf);
1462 	fuse_copy_finish(cs);
1463 	return err;
1464 }
1465 
1466 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1467 			      struct fuse_copy_state *cs)
1468 {
1469 	struct fuse_notify_delete_out outarg;
1470 	int err = -ENOMEM;
1471 	char *buf;
1472 	struct qstr name;
1473 
1474 	buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1475 	if (!buf)
1476 		goto err;
1477 
1478 	err = -EINVAL;
1479 	if (size < sizeof(outarg))
1480 		goto err;
1481 
1482 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1483 	if (err)
1484 		goto err;
1485 
1486 	err = -ENAMETOOLONG;
1487 	if (outarg.namelen > FUSE_NAME_MAX)
1488 		goto err;
1489 
1490 	err = -EINVAL;
1491 	if (size != sizeof(outarg) + outarg.namelen + 1)
1492 		goto err;
1493 
1494 	name.name = buf;
1495 	name.len = outarg.namelen;
1496 	err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1497 	if (err)
1498 		goto err;
1499 	fuse_copy_finish(cs);
1500 	buf[outarg.namelen] = 0;
1501 
1502 	down_read(&fc->killsb);
1503 	err = -ENOENT;
1504 	if (fc->sb)
1505 		err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1506 					       outarg.child, &name);
1507 	up_read(&fc->killsb);
1508 	kfree(buf);
1509 	return err;
1510 
1511 err:
1512 	kfree(buf);
1513 	fuse_copy_finish(cs);
1514 	return err;
1515 }
1516 
1517 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1518 			     struct fuse_copy_state *cs)
1519 {
1520 	struct fuse_notify_store_out outarg;
1521 	struct inode *inode;
1522 	struct address_space *mapping;
1523 	u64 nodeid;
1524 	int err;
1525 	pgoff_t index;
1526 	unsigned int offset;
1527 	unsigned int num;
1528 	loff_t file_size;
1529 	loff_t end;
1530 
1531 	err = -EINVAL;
1532 	if (size < sizeof(outarg))
1533 		goto out_finish;
1534 
1535 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1536 	if (err)
1537 		goto out_finish;
1538 
1539 	err = -EINVAL;
1540 	if (size - sizeof(outarg) != outarg.size)
1541 		goto out_finish;
1542 
1543 	nodeid = outarg.nodeid;
1544 
1545 	down_read(&fc->killsb);
1546 
1547 	err = -ENOENT;
1548 	if (!fc->sb)
1549 		goto out_up_killsb;
1550 
1551 	inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1552 	if (!inode)
1553 		goto out_up_killsb;
1554 
1555 	mapping = inode->i_mapping;
1556 	index = outarg.offset >> PAGE_SHIFT;
1557 	offset = outarg.offset & ~PAGE_MASK;
1558 	file_size = i_size_read(inode);
1559 	end = outarg.offset + outarg.size;
1560 	if (end > file_size) {
1561 		file_size = end;
1562 		fuse_write_update_size(inode, file_size);
1563 	}
1564 
1565 	num = outarg.size;
1566 	while (num) {
1567 		struct page *page;
1568 		unsigned int this_num;
1569 
1570 		err = -ENOMEM;
1571 		page = find_or_create_page(mapping, index,
1572 					   mapping_gfp_mask(mapping));
1573 		if (!page)
1574 			goto out_iput;
1575 
1576 		this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1577 		err = fuse_copy_page(cs, &page, offset, this_num, 0);
1578 		if (!err && offset == 0 &&
1579 		    (this_num == PAGE_SIZE || file_size == end))
1580 			SetPageUptodate(page);
1581 		unlock_page(page);
1582 		put_page(page);
1583 
1584 		if (err)
1585 			goto out_iput;
1586 
1587 		num -= this_num;
1588 		offset = 0;
1589 		index++;
1590 	}
1591 
1592 	err = 0;
1593 
1594 out_iput:
1595 	iput(inode);
1596 out_up_killsb:
1597 	up_read(&fc->killsb);
1598 out_finish:
1599 	fuse_copy_finish(cs);
1600 	return err;
1601 }
1602 
1603 struct fuse_retrieve_args {
1604 	struct fuse_args_pages ap;
1605 	struct fuse_notify_retrieve_in inarg;
1606 };
1607 
1608 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_args *args,
1609 			      int error)
1610 {
1611 	struct fuse_retrieve_args *ra =
1612 		container_of(args, typeof(*ra), ap.args);
1613 
1614 	release_pages(ra->ap.pages, ra->ap.num_pages);
1615 	kfree(ra);
1616 }
1617 
1618 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1619 			 struct fuse_notify_retrieve_out *outarg)
1620 {
1621 	int err;
1622 	struct address_space *mapping = inode->i_mapping;
1623 	pgoff_t index;
1624 	loff_t file_size;
1625 	unsigned int num;
1626 	unsigned int offset;
1627 	size_t total_len = 0;
1628 	unsigned int num_pages;
1629 	struct fuse_retrieve_args *ra;
1630 	size_t args_size = sizeof(*ra);
1631 	struct fuse_args_pages *ap;
1632 	struct fuse_args *args;
1633 
1634 	offset = outarg->offset & ~PAGE_MASK;
1635 	file_size = i_size_read(inode);
1636 
1637 	num = min(outarg->size, fc->max_write);
1638 	if (outarg->offset > file_size)
1639 		num = 0;
1640 	else if (outarg->offset + num > file_size)
1641 		num = file_size - outarg->offset;
1642 
1643 	num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1644 	num_pages = min(num_pages, fc->max_pages);
1645 
1646 	args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0]));
1647 
1648 	ra = kzalloc(args_size, GFP_KERNEL);
1649 	if (!ra)
1650 		return -ENOMEM;
1651 
1652 	ap = &ra->ap;
1653 	ap->pages = (void *) (ra + 1);
1654 	ap->descs = (void *) (ap->pages + num_pages);
1655 
1656 	args = &ap->args;
1657 	args->nodeid = outarg->nodeid;
1658 	args->opcode = FUSE_NOTIFY_REPLY;
1659 	args->in_numargs = 2;
1660 	args->in_pages = true;
1661 	args->end = fuse_retrieve_end;
1662 
1663 	index = outarg->offset >> PAGE_SHIFT;
1664 
1665 	while (num && ap->num_pages < num_pages) {
1666 		struct page *page;
1667 		unsigned int this_num;
1668 
1669 		page = find_get_page(mapping, index);
1670 		if (!page)
1671 			break;
1672 
1673 		this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1674 		ap->pages[ap->num_pages] = page;
1675 		ap->descs[ap->num_pages].offset = offset;
1676 		ap->descs[ap->num_pages].length = this_num;
1677 		ap->num_pages++;
1678 
1679 		offset = 0;
1680 		num -= this_num;
1681 		total_len += this_num;
1682 		index++;
1683 	}
1684 	ra->inarg.offset = outarg->offset;
1685 	ra->inarg.size = total_len;
1686 	args->in_args[0].size = sizeof(ra->inarg);
1687 	args->in_args[0].value = &ra->inarg;
1688 	args->in_args[1].size = total_len;
1689 
1690 	err = fuse_simple_notify_reply(fc, args, outarg->notify_unique);
1691 	if (err)
1692 		fuse_retrieve_end(fc, args, err);
1693 
1694 	return err;
1695 }
1696 
1697 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1698 				struct fuse_copy_state *cs)
1699 {
1700 	struct fuse_notify_retrieve_out outarg;
1701 	struct inode *inode;
1702 	int err;
1703 
1704 	err = -EINVAL;
1705 	if (size != sizeof(outarg))
1706 		goto copy_finish;
1707 
1708 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1709 	if (err)
1710 		goto copy_finish;
1711 
1712 	fuse_copy_finish(cs);
1713 
1714 	down_read(&fc->killsb);
1715 	err = -ENOENT;
1716 	if (fc->sb) {
1717 		u64 nodeid = outarg.nodeid;
1718 
1719 		inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1720 		if (inode) {
1721 			err = fuse_retrieve(fc, inode, &outarg);
1722 			iput(inode);
1723 		}
1724 	}
1725 	up_read(&fc->killsb);
1726 
1727 	return err;
1728 
1729 copy_finish:
1730 	fuse_copy_finish(cs);
1731 	return err;
1732 }
1733 
1734 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1735 		       unsigned int size, struct fuse_copy_state *cs)
1736 {
1737 	/* Don't try to move pages (yet) */
1738 	cs->move_pages = 0;
1739 
1740 	switch (code) {
1741 	case FUSE_NOTIFY_POLL:
1742 		return fuse_notify_poll(fc, size, cs);
1743 
1744 	case FUSE_NOTIFY_INVAL_INODE:
1745 		return fuse_notify_inval_inode(fc, size, cs);
1746 
1747 	case FUSE_NOTIFY_INVAL_ENTRY:
1748 		return fuse_notify_inval_entry(fc, size, cs);
1749 
1750 	case FUSE_NOTIFY_STORE:
1751 		return fuse_notify_store(fc, size, cs);
1752 
1753 	case FUSE_NOTIFY_RETRIEVE:
1754 		return fuse_notify_retrieve(fc, size, cs);
1755 
1756 	case FUSE_NOTIFY_DELETE:
1757 		return fuse_notify_delete(fc, size, cs);
1758 
1759 	default:
1760 		fuse_copy_finish(cs);
1761 		return -EINVAL;
1762 	}
1763 }
1764 
1765 /* Look up request on processing list by unique ID */
1766 static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
1767 {
1768 	unsigned int hash = fuse_req_hash(unique);
1769 	struct fuse_req *req;
1770 
1771 	list_for_each_entry(req, &fpq->processing[hash], list) {
1772 		if (req->in.h.unique == unique)
1773 			return req;
1774 	}
1775 	return NULL;
1776 }
1777 
1778 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
1779 			 unsigned nbytes)
1780 {
1781 	unsigned reqsize = sizeof(struct fuse_out_header);
1782 
1783 	reqsize += fuse_len_args(args->out_numargs, args->out_args);
1784 
1785 	if (reqsize < nbytes || (reqsize > nbytes && !args->out_argvar))
1786 		return -EINVAL;
1787 	else if (reqsize > nbytes) {
1788 		struct fuse_arg *lastarg = &args->out_args[args->out_numargs-1];
1789 		unsigned diffsize = reqsize - nbytes;
1790 
1791 		if (diffsize > lastarg->size)
1792 			return -EINVAL;
1793 		lastarg->size -= diffsize;
1794 	}
1795 	return fuse_copy_args(cs, args->out_numargs, args->out_pages,
1796 			      args->out_args, args->page_zeroing);
1797 }
1798 
1799 /*
1800  * Write a single reply to a request.  First the header is copied from
1801  * the write buffer.  The request is then searched on the processing
1802  * list by the unique ID found in the header.  If found, then remove
1803  * it from the list and copy the rest of the buffer to the request.
1804  * The request is finished by calling fuse_request_end().
1805  */
1806 static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
1807 				 struct fuse_copy_state *cs, size_t nbytes)
1808 {
1809 	int err;
1810 	struct fuse_conn *fc = fud->fc;
1811 	struct fuse_pqueue *fpq = &fud->pq;
1812 	struct fuse_req *req;
1813 	struct fuse_out_header oh;
1814 
1815 	err = -EINVAL;
1816 	if (nbytes < sizeof(struct fuse_out_header))
1817 		goto out;
1818 
1819 	err = fuse_copy_one(cs, &oh, sizeof(oh));
1820 	if (err)
1821 		goto copy_finish;
1822 
1823 	err = -EINVAL;
1824 	if (oh.len != nbytes)
1825 		goto copy_finish;
1826 
1827 	/*
1828 	 * Zero oh.unique indicates unsolicited notification message
1829 	 * and error contains notification code.
1830 	 */
1831 	if (!oh.unique) {
1832 		err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1833 		goto out;
1834 	}
1835 
1836 	err = -EINVAL;
1837 	if (oh.error <= -1000 || oh.error > 0)
1838 		goto copy_finish;
1839 
1840 	spin_lock(&fpq->lock);
1841 	req = NULL;
1842 	if (fpq->connected)
1843 		req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
1844 
1845 	err = -ENOENT;
1846 	if (!req) {
1847 		spin_unlock(&fpq->lock);
1848 		goto copy_finish;
1849 	}
1850 
1851 	/* Is it an interrupt reply ID? */
1852 	if (oh.unique & FUSE_INT_REQ_BIT) {
1853 		__fuse_get_request(req);
1854 		spin_unlock(&fpq->lock);
1855 
1856 		err = 0;
1857 		if (nbytes != sizeof(struct fuse_out_header))
1858 			err = -EINVAL;
1859 		else if (oh.error == -ENOSYS)
1860 			fc->no_interrupt = 1;
1861 		else if (oh.error == -EAGAIN)
1862 			err = queue_interrupt(&fc->iq, req);
1863 
1864 		fuse_put_request(fc, req);
1865 
1866 		goto copy_finish;
1867 	}
1868 
1869 	clear_bit(FR_SENT, &req->flags);
1870 	list_move(&req->list, &fpq->io);
1871 	req->out.h = oh;
1872 	set_bit(FR_LOCKED, &req->flags);
1873 	spin_unlock(&fpq->lock);
1874 	cs->req = req;
1875 	if (!req->args->page_replace)
1876 		cs->move_pages = 0;
1877 
1878 	if (oh.error)
1879 		err = nbytes != sizeof(oh) ? -EINVAL : 0;
1880 	else
1881 		err = copy_out_args(cs, req->args, nbytes);
1882 	fuse_copy_finish(cs);
1883 
1884 	spin_lock(&fpq->lock);
1885 	clear_bit(FR_LOCKED, &req->flags);
1886 	if (!fpq->connected)
1887 		err = -ENOENT;
1888 	else if (err)
1889 		req->out.h.error = -EIO;
1890 	if (!test_bit(FR_PRIVATE, &req->flags))
1891 		list_del_init(&req->list);
1892 	spin_unlock(&fpq->lock);
1893 
1894 	fuse_request_end(fc, req);
1895 out:
1896 	return err ? err : nbytes;
1897 
1898 copy_finish:
1899 	fuse_copy_finish(cs);
1900 	goto out;
1901 }
1902 
1903 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
1904 {
1905 	struct fuse_copy_state cs;
1906 	struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
1907 
1908 	if (!fud)
1909 		return -EPERM;
1910 
1911 	if (!iter_is_iovec(from))
1912 		return -EINVAL;
1913 
1914 	fuse_copy_init(&cs, 0, from);
1915 
1916 	return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
1917 }
1918 
1919 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1920 				     struct file *out, loff_t *ppos,
1921 				     size_t len, unsigned int flags)
1922 {
1923 	unsigned nbuf;
1924 	unsigned idx;
1925 	struct pipe_buffer *bufs;
1926 	struct fuse_copy_state cs;
1927 	struct fuse_dev *fud;
1928 	size_t rem;
1929 	ssize_t ret;
1930 
1931 	fud = fuse_get_dev(out);
1932 	if (!fud)
1933 		return -EPERM;
1934 
1935 	pipe_lock(pipe);
1936 
1937 	bufs = kvmalloc_array(pipe->nrbufs, sizeof(struct pipe_buffer),
1938 			      GFP_KERNEL);
1939 	if (!bufs) {
1940 		pipe_unlock(pipe);
1941 		return -ENOMEM;
1942 	}
1943 
1944 	nbuf = 0;
1945 	rem = 0;
1946 	for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1947 		rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1948 
1949 	ret = -EINVAL;
1950 	if (rem < len)
1951 		goto out_free;
1952 
1953 	rem = len;
1954 	while (rem) {
1955 		struct pipe_buffer *ibuf;
1956 		struct pipe_buffer *obuf;
1957 
1958 		BUG_ON(nbuf >= pipe->buffers);
1959 		BUG_ON(!pipe->nrbufs);
1960 		ibuf = &pipe->bufs[pipe->curbuf];
1961 		obuf = &bufs[nbuf];
1962 
1963 		if (rem >= ibuf->len) {
1964 			*obuf = *ibuf;
1965 			ibuf->ops = NULL;
1966 			pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1967 			pipe->nrbufs--;
1968 		} else {
1969 			if (!pipe_buf_get(pipe, ibuf))
1970 				goto out_free;
1971 
1972 			*obuf = *ibuf;
1973 			obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1974 			obuf->len = rem;
1975 			ibuf->offset += obuf->len;
1976 			ibuf->len -= obuf->len;
1977 		}
1978 		nbuf++;
1979 		rem -= obuf->len;
1980 	}
1981 	pipe_unlock(pipe);
1982 
1983 	fuse_copy_init(&cs, 0, NULL);
1984 	cs.pipebufs = bufs;
1985 	cs.nr_segs = nbuf;
1986 	cs.pipe = pipe;
1987 
1988 	if (flags & SPLICE_F_MOVE)
1989 		cs.move_pages = 1;
1990 
1991 	ret = fuse_dev_do_write(fud, &cs, len);
1992 
1993 	pipe_lock(pipe);
1994 out_free:
1995 	for (idx = 0; idx < nbuf; idx++)
1996 		pipe_buf_release(pipe, &bufs[idx]);
1997 	pipe_unlock(pipe);
1998 
1999 	kvfree(bufs);
2000 	return ret;
2001 }
2002 
2003 static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
2004 {
2005 	__poll_t mask = EPOLLOUT | EPOLLWRNORM;
2006 	struct fuse_iqueue *fiq;
2007 	struct fuse_dev *fud = fuse_get_dev(file);
2008 
2009 	if (!fud)
2010 		return EPOLLERR;
2011 
2012 	fiq = &fud->fc->iq;
2013 	poll_wait(file, &fiq->waitq, wait);
2014 
2015 	spin_lock(&fiq->lock);
2016 	if (!fiq->connected)
2017 		mask = EPOLLERR;
2018 	else if (request_pending(fiq))
2019 		mask |= EPOLLIN | EPOLLRDNORM;
2020 	spin_unlock(&fiq->lock);
2021 
2022 	return mask;
2023 }
2024 
2025 /* Abort all requests on the given list (pending or processing) */
2026 static void end_requests(struct fuse_conn *fc, struct list_head *head)
2027 {
2028 	while (!list_empty(head)) {
2029 		struct fuse_req *req;
2030 		req = list_entry(head->next, struct fuse_req, list);
2031 		req->out.h.error = -ECONNABORTED;
2032 		clear_bit(FR_SENT, &req->flags);
2033 		list_del_init(&req->list);
2034 		fuse_request_end(fc, req);
2035 	}
2036 }
2037 
2038 static void end_polls(struct fuse_conn *fc)
2039 {
2040 	struct rb_node *p;
2041 
2042 	p = rb_first(&fc->polled_files);
2043 
2044 	while (p) {
2045 		struct fuse_file *ff;
2046 		ff = rb_entry(p, struct fuse_file, polled_node);
2047 		wake_up_interruptible_all(&ff->poll_wait);
2048 
2049 		p = rb_next(p);
2050 	}
2051 }
2052 
2053 /*
2054  * Abort all requests.
2055  *
2056  * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2057  * filesystem.
2058  *
2059  * The same effect is usually achievable through killing the filesystem daemon
2060  * and all users of the filesystem.  The exception is the combination of an
2061  * asynchronous request and the tricky deadlock (see
2062  * Documentation/filesystems/fuse.txt).
2063  *
2064  * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2065  * requests, they should be finished off immediately.  Locked requests will be
2066  * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2067  * requests.  It is possible that some request will finish before we can.  This
2068  * is OK, the request will in that case be removed from the list before we touch
2069  * it.
2070  */
2071 void fuse_abort_conn(struct fuse_conn *fc)
2072 {
2073 	struct fuse_iqueue *fiq = &fc->iq;
2074 
2075 	spin_lock(&fc->lock);
2076 	if (fc->connected) {
2077 		struct fuse_dev *fud;
2078 		struct fuse_req *req, *next;
2079 		LIST_HEAD(to_end);
2080 		unsigned int i;
2081 
2082 		/* Background queuing checks fc->connected under bg_lock */
2083 		spin_lock(&fc->bg_lock);
2084 		fc->connected = 0;
2085 		spin_unlock(&fc->bg_lock);
2086 
2087 		fuse_set_initialized(fc);
2088 		list_for_each_entry(fud, &fc->devices, entry) {
2089 			struct fuse_pqueue *fpq = &fud->pq;
2090 
2091 			spin_lock(&fpq->lock);
2092 			fpq->connected = 0;
2093 			list_for_each_entry_safe(req, next, &fpq->io, list) {
2094 				req->out.h.error = -ECONNABORTED;
2095 				spin_lock(&req->waitq.lock);
2096 				set_bit(FR_ABORTED, &req->flags);
2097 				if (!test_bit(FR_LOCKED, &req->flags)) {
2098 					set_bit(FR_PRIVATE, &req->flags);
2099 					__fuse_get_request(req);
2100 					list_move(&req->list, &to_end);
2101 				}
2102 				spin_unlock(&req->waitq.lock);
2103 			}
2104 			for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
2105 				list_splice_tail_init(&fpq->processing[i],
2106 						      &to_end);
2107 			spin_unlock(&fpq->lock);
2108 		}
2109 		spin_lock(&fc->bg_lock);
2110 		fc->blocked = 0;
2111 		fc->max_background = UINT_MAX;
2112 		flush_bg_queue(fc);
2113 		spin_unlock(&fc->bg_lock);
2114 
2115 		spin_lock(&fiq->lock);
2116 		fiq->connected = 0;
2117 		list_for_each_entry(req, &fiq->pending, list)
2118 			clear_bit(FR_PENDING, &req->flags);
2119 		list_splice_tail_init(&fiq->pending, &to_end);
2120 		while (forget_pending(fiq))
2121 			kfree(dequeue_forget(fiq, 1, NULL));
2122 		wake_up_all(&fiq->waitq);
2123 		spin_unlock(&fiq->lock);
2124 		kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2125 		end_polls(fc);
2126 		wake_up_all(&fc->blocked_waitq);
2127 		spin_unlock(&fc->lock);
2128 
2129 		end_requests(fc, &to_end);
2130 	} else {
2131 		spin_unlock(&fc->lock);
2132 	}
2133 }
2134 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2135 
2136 void fuse_wait_aborted(struct fuse_conn *fc)
2137 {
2138 	/* matches implicit memory barrier in fuse_drop_waiting() */
2139 	smp_mb();
2140 	wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
2141 }
2142 
2143 int fuse_dev_release(struct inode *inode, struct file *file)
2144 {
2145 	struct fuse_dev *fud = fuse_get_dev(file);
2146 
2147 	if (fud) {
2148 		struct fuse_conn *fc = fud->fc;
2149 		struct fuse_pqueue *fpq = &fud->pq;
2150 		LIST_HEAD(to_end);
2151 		unsigned int i;
2152 
2153 		spin_lock(&fpq->lock);
2154 		WARN_ON(!list_empty(&fpq->io));
2155 		for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
2156 			list_splice_init(&fpq->processing[i], &to_end);
2157 		spin_unlock(&fpq->lock);
2158 
2159 		end_requests(fc, &to_end);
2160 
2161 		/* Are we the last open device? */
2162 		if (atomic_dec_and_test(&fc->dev_count)) {
2163 			WARN_ON(fc->iq.fasync != NULL);
2164 			fuse_abort_conn(fc);
2165 		}
2166 		fuse_dev_free(fud);
2167 	}
2168 	return 0;
2169 }
2170 EXPORT_SYMBOL_GPL(fuse_dev_release);
2171 
2172 static int fuse_dev_fasync(int fd, struct file *file, int on)
2173 {
2174 	struct fuse_dev *fud = fuse_get_dev(file);
2175 
2176 	if (!fud)
2177 		return -EPERM;
2178 
2179 	/* No locking - fasync_helper does its own locking */
2180 	return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
2181 }
2182 
2183 static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
2184 {
2185 	struct fuse_dev *fud;
2186 
2187 	if (new->private_data)
2188 		return -EINVAL;
2189 
2190 	fud = fuse_dev_alloc(fc);
2191 	if (!fud)
2192 		return -ENOMEM;
2193 
2194 	new->private_data = fud;
2195 	atomic_inc(&fc->dev_count);
2196 
2197 	return 0;
2198 }
2199 
2200 static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2201 			   unsigned long arg)
2202 {
2203 	int err = -ENOTTY;
2204 
2205 	if (cmd == FUSE_DEV_IOC_CLONE) {
2206 		int oldfd;
2207 
2208 		err = -EFAULT;
2209 		if (!get_user(oldfd, (__u32 __user *) arg)) {
2210 			struct file *old = fget(oldfd);
2211 
2212 			err = -EINVAL;
2213 			if (old) {
2214 				struct fuse_dev *fud = NULL;
2215 
2216 				/*
2217 				 * Check against file->f_op because CUSE
2218 				 * uses the same ioctl handler.
2219 				 */
2220 				if (old->f_op == file->f_op &&
2221 				    old->f_cred->user_ns == file->f_cred->user_ns)
2222 					fud = fuse_get_dev(old);
2223 
2224 				if (fud) {
2225 					mutex_lock(&fuse_mutex);
2226 					err = fuse_device_clone(fud->fc, file);
2227 					mutex_unlock(&fuse_mutex);
2228 				}
2229 				fput(old);
2230 			}
2231 		}
2232 	}
2233 	return err;
2234 }
2235 
2236 const struct file_operations fuse_dev_operations = {
2237 	.owner		= THIS_MODULE,
2238 	.open		= fuse_dev_open,
2239 	.llseek		= no_llseek,
2240 	.read_iter	= fuse_dev_read,
2241 	.splice_read	= fuse_dev_splice_read,
2242 	.write_iter	= fuse_dev_write,
2243 	.splice_write	= fuse_dev_splice_write,
2244 	.poll		= fuse_dev_poll,
2245 	.release	= fuse_dev_release,
2246 	.fasync		= fuse_dev_fasync,
2247 	.unlocked_ioctl = fuse_dev_ioctl,
2248 	.compat_ioctl   = fuse_dev_ioctl,
2249 };
2250 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2251 
2252 static struct miscdevice fuse_miscdevice = {
2253 	.minor = FUSE_MINOR,
2254 	.name  = "fuse",
2255 	.fops = &fuse_dev_operations,
2256 };
2257 
2258 int __init fuse_dev_init(void)
2259 {
2260 	int err = -ENOMEM;
2261 	fuse_req_cachep = kmem_cache_create("fuse_request",
2262 					    sizeof(struct fuse_req),
2263 					    0, 0, NULL);
2264 	if (!fuse_req_cachep)
2265 		goto out;
2266 
2267 	err = misc_register(&fuse_miscdevice);
2268 	if (err)
2269 		goto out_cache_clean;
2270 
2271 	return 0;
2272 
2273  out_cache_clean:
2274 	kmem_cache_destroy(fuse_req_cachep);
2275  out:
2276 	return err;
2277 }
2278 
2279 void fuse_dev_cleanup(void)
2280 {
2281 	misc_deregister(&fuse_miscdevice);
2282 	kmem_cache_destroy(fuse_req_cachep);
2283 }
2284