xref: /openbmc/linux/fs/fuse/dev.c (revision e1e38ea1)
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4 
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8 
9 #include "fuse_i.h"
10 
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/sched/signal.h>
15 #include <linux/uio.h>
16 #include <linux/miscdevice.h>
17 #include <linux/pagemap.h>
18 #include <linux/file.h>
19 #include <linux/slab.h>
20 #include <linux/pipe_fs_i.h>
21 #include <linux/swap.h>
22 #include <linux/splice.h>
23 #include <linux/sched.h>
24 
25 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
26 MODULE_ALIAS("devname:fuse");
27 
28 static struct kmem_cache *fuse_req_cachep;
29 
30 static struct fuse_dev *fuse_get_dev(struct file *file)
31 {
32 	/*
33 	 * Lockless access is OK, because file->private data is set
34 	 * once during mount and is valid until the file is released.
35 	 */
36 	return READ_ONCE(file->private_data);
37 }
38 
39 static void fuse_request_init(struct fuse_req *req, struct page **pages,
40 			      struct fuse_page_desc *page_descs,
41 			      unsigned npages)
42 {
43 	memset(req, 0, sizeof(*req));
44 	memset(pages, 0, sizeof(*pages) * npages);
45 	memset(page_descs, 0, sizeof(*page_descs) * npages);
46 	INIT_LIST_HEAD(&req->list);
47 	INIT_LIST_HEAD(&req->intr_entry);
48 	init_waitqueue_head(&req->waitq);
49 	refcount_set(&req->count, 1);
50 	req->pages = pages;
51 	req->page_descs = page_descs;
52 	req->max_pages = npages;
53 	__set_bit(FR_PENDING, &req->flags);
54 }
55 
56 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
57 {
58 	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
59 	if (req) {
60 		struct page **pages;
61 		struct fuse_page_desc *page_descs;
62 
63 		if (npages <= FUSE_REQ_INLINE_PAGES) {
64 			pages = req->inline_pages;
65 			page_descs = req->inline_page_descs;
66 		} else {
67 			pages = kmalloc_array(npages, sizeof(struct page *),
68 					      flags);
69 			page_descs =
70 				kmalloc_array(npages,
71 					      sizeof(struct fuse_page_desc),
72 					      flags);
73 		}
74 
75 		if (!pages || !page_descs) {
76 			kfree(pages);
77 			kfree(page_descs);
78 			kmem_cache_free(fuse_req_cachep, req);
79 			return NULL;
80 		}
81 
82 		fuse_request_init(req, pages, page_descs, npages);
83 	}
84 	return req;
85 }
86 
87 struct fuse_req *fuse_request_alloc(unsigned npages)
88 {
89 	return __fuse_request_alloc(npages, GFP_KERNEL);
90 }
91 EXPORT_SYMBOL_GPL(fuse_request_alloc);
92 
93 struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
94 {
95 	return __fuse_request_alloc(npages, GFP_NOFS);
96 }
97 
98 void fuse_request_free(struct fuse_req *req)
99 {
100 	if (req->pages != req->inline_pages) {
101 		kfree(req->pages);
102 		kfree(req->page_descs);
103 	}
104 	kmem_cache_free(fuse_req_cachep, req);
105 }
106 
107 void __fuse_get_request(struct fuse_req *req)
108 {
109 	refcount_inc(&req->count);
110 }
111 
112 /* Must be called with > 1 refcount */
113 static void __fuse_put_request(struct fuse_req *req)
114 {
115 	refcount_dec(&req->count);
116 }
117 
118 void fuse_set_initialized(struct fuse_conn *fc)
119 {
120 	/* Make sure stores before this are seen on another CPU */
121 	smp_wmb();
122 	fc->initialized = 1;
123 }
124 
125 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
126 {
127 	return !fc->initialized || (for_background && fc->blocked);
128 }
129 
130 static void fuse_drop_waiting(struct fuse_conn *fc)
131 {
132 	if (fc->connected) {
133 		atomic_dec(&fc->num_waiting);
134 	} else if (atomic_dec_and_test(&fc->num_waiting)) {
135 		/* wake up aborters */
136 		wake_up_all(&fc->blocked_waitq);
137 	}
138 }
139 
140 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
141 				       bool for_background)
142 {
143 	struct fuse_req *req;
144 	int err;
145 	atomic_inc(&fc->num_waiting);
146 
147 	if (fuse_block_alloc(fc, for_background)) {
148 		err = -EINTR;
149 		if (wait_event_killable_exclusive(fc->blocked_waitq,
150 				!fuse_block_alloc(fc, for_background)))
151 			goto out;
152 	}
153 	/* Matches smp_wmb() in fuse_set_initialized() */
154 	smp_rmb();
155 
156 	err = -ENOTCONN;
157 	if (!fc->connected)
158 		goto out;
159 
160 	err = -ECONNREFUSED;
161 	if (fc->conn_error)
162 		goto out;
163 
164 	req = fuse_request_alloc(npages);
165 	err = -ENOMEM;
166 	if (!req) {
167 		if (for_background)
168 			wake_up(&fc->blocked_waitq);
169 		goto out;
170 	}
171 
172 	req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
173 	req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
174 	req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
175 
176 	__set_bit(FR_WAITING, &req->flags);
177 	if (for_background)
178 		__set_bit(FR_BACKGROUND, &req->flags);
179 
180 	if (unlikely(req->in.h.uid == ((uid_t)-1) ||
181 		     req->in.h.gid == ((gid_t)-1))) {
182 		fuse_put_request(fc, req);
183 		return ERR_PTR(-EOVERFLOW);
184 	}
185 	return req;
186 
187  out:
188 	fuse_drop_waiting(fc);
189 	return ERR_PTR(err);
190 }
191 
192 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
193 {
194 	return __fuse_get_req(fc, npages, false);
195 }
196 EXPORT_SYMBOL_GPL(fuse_get_req);
197 
198 struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
199 					     unsigned npages)
200 {
201 	return __fuse_get_req(fc, npages, true);
202 }
203 EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
204 
205 /*
206  * Return request in fuse_file->reserved_req.  However that may
207  * currently be in use.  If that is the case, wait for it to become
208  * available.
209  */
210 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
211 					 struct file *file)
212 {
213 	struct fuse_req *req = NULL;
214 	struct fuse_file *ff = file->private_data;
215 
216 	do {
217 		wait_event(fc->reserved_req_waitq, ff->reserved_req);
218 		spin_lock(&fc->lock);
219 		if (ff->reserved_req) {
220 			req = ff->reserved_req;
221 			ff->reserved_req = NULL;
222 			req->stolen_file = get_file(file);
223 		}
224 		spin_unlock(&fc->lock);
225 	} while (!req);
226 
227 	return req;
228 }
229 
230 /*
231  * Put stolen request back into fuse_file->reserved_req
232  */
233 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
234 {
235 	struct file *file = req->stolen_file;
236 	struct fuse_file *ff = file->private_data;
237 
238 	spin_lock(&fc->lock);
239 	fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
240 	BUG_ON(ff->reserved_req);
241 	ff->reserved_req = req;
242 	wake_up_all(&fc->reserved_req_waitq);
243 	spin_unlock(&fc->lock);
244 	fput(file);
245 }
246 
247 /*
248  * Gets a requests for a file operation, always succeeds
249  *
250  * This is used for sending the FLUSH request, which must get to
251  * userspace, due to POSIX locks which may need to be unlocked.
252  *
253  * If allocation fails due to OOM, use the reserved request in
254  * fuse_file.
255  *
256  * This is very unlikely to deadlock accidentally, since the
257  * filesystem should not have it's own file open.  If deadlock is
258  * intentional, it can still be broken by "aborting" the filesystem.
259  */
260 struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
261 					     struct file *file)
262 {
263 	struct fuse_req *req;
264 
265 	atomic_inc(&fc->num_waiting);
266 	wait_event(fc->blocked_waitq, fc->initialized);
267 	/* Matches smp_wmb() in fuse_set_initialized() */
268 	smp_rmb();
269 	req = fuse_request_alloc(0);
270 	if (!req)
271 		req = get_reserved_req(fc, file);
272 
273 	req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
274 	req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
275 	req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
276 
277 	__set_bit(FR_WAITING, &req->flags);
278 	__clear_bit(FR_BACKGROUND, &req->flags);
279 	return req;
280 }
281 
282 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
283 {
284 	if (refcount_dec_and_test(&req->count)) {
285 		if (test_bit(FR_BACKGROUND, &req->flags)) {
286 			/*
287 			 * We get here in the unlikely case that a background
288 			 * request was allocated but not sent
289 			 */
290 			spin_lock(&fc->lock);
291 			if (!fc->blocked)
292 				wake_up(&fc->blocked_waitq);
293 			spin_unlock(&fc->lock);
294 		}
295 
296 		if (test_bit(FR_WAITING, &req->flags)) {
297 			__clear_bit(FR_WAITING, &req->flags);
298 			fuse_drop_waiting(fc);
299 		}
300 
301 		if (req->stolen_file)
302 			put_reserved_req(fc, req);
303 		else
304 			fuse_request_free(req);
305 	}
306 }
307 EXPORT_SYMBOL_GPL(fuse_put_request);
308 
309 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
310 {
311 	unsigned nbytes = 0;
312 	unsigned i;
313 
314 	for (i = 0; i < numargs; i++)
315 		nbytes += args[i].size;
316 
317 	return nbytes;
318 }
319 
320 static u64 fuse_get_unique(struct fuse_iqueue *fiq)
321 {
322 	return ++fiq->reqctr;
323 }
324 
325 static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
326 {
327 	req->in.h.len = sizeof(struct fuse_in_header) +
328 		len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
329 	list_add_tail(&req->list, &fiq->pending);
330 	wake_up_locked(&fiq->waitq);
331 	kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
332 }
333 
334 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
335 		       u64 nodeid, u64 nlookup)
336 {
337 	struct fuse_iqueue *fiq = &fc->iq;
338 
339 	forget->forget_one.nodeid = nodeid;
340 	forget->forget_one.nlookup = nlookup;
341 
342 	spin_lock(&fiq->waitq.lock);
343 	if (fiq->connected) {
344 		fiq->forget_list_tail->next = forget;
345 		fiq->forget_list_tail = forget;
346 		wake_up_locked(&fiq->waitq);
347 		kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
348 	} else {
349 		kfree(forget);
350 	}
351 	spin_unlock(&fiq->waitq.lock);
352 }
353 
354 static void flush_bg_queue(struct fuse_conn *fc)
355 {
356 	while (fc->active_background < fc->max_background &&
357 	       !list_empty(&fc->bg_queue)) {
358 		struct fuse_req *req;
359 		struct fuse_iqueue *fiq = &fc->iq;
360 
361 		req = list_entry(fc->bg_queue.next, struct fuse_req, list);
362 		list_del(&req->list);
363 		fc->active_background++;
364 		spin_lock(&fiq->waitq.lock);
365 		req->in.h.unique = fuse_get_unique(fiq);
366 		queue_request(fiq, req);
367 		spin_unlock(&fiq->waitq.lock);
368 	}
369 }
370 
371 /*
372  * This function is called when a request is finished.  Either a reply
373  * has arrived or it was aborted (and not yet sent) or some error
374  * occurred during communication with userspace, or the device file
375  * was closed.  The requester thread is woken up (if still waiting),
376  * the 'end' callback is called if given, else the reference to the
377  * request is released
378  */
379 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
380 {
381 	struct fuse_iqueue *fiq = &fc->iq;
382 
383 	if (test_and_set_bit(FR_FINISHED, &req->flags))
384 		goto put_request;
385 
386 	spin_lock(&fiq->waitq.lock);
387 	list_del_init(&req->intr_entry);
388 	spin_unlock(&fiq->waitq.lock);
389 	WARN_ON(test_bit(FR_PENDING, &req->flags));
390 	WARN_ON(test_bit(FR_SENT, &req->flags));
391 	if (test_bit(FR_BACKGROUND, &req->flags)) {
392 		spin_lock(&fc->lock);
393 		clear_bit(FR_BACKGROUND, &req->flags);
394 		if (fc->num_background == fc->max_background)
395 			fc->blocked = 0;
396 
397 		/* Wake up next waiter, if any */
398 		if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
399 			wake_up(&fc->blocked_waitq);
400 
401 		if (fc->num_background == fc->congestion_threshold && fc->sb) {
402 			clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
403 			clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
404 		}
405 		fc->num_background--;
406 		fc->active_background--;
407 		flush_bg_queue(fc);
408 		spin_unlock(&fc->lock);
409 	}
410 	wake_up(&req->waitq);
411 	if (req->end)
412 		req->end(fc, req);
413 put_request:
414 	fuse_put_request(fc, req);
415 }
416 
417 static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
418 {
419 	spin_lock(&fiq->waitq.lock);
420 	if (test_bit(FR_FINISHED, &req->flags)) {
421 		spin_unlock(&fiq->waitq.lock);
422 		return;
423 	}
424 	if (list_empty(&req->intr_entry)) {
425 		list_add_tail(&req->intr_entry, &fiq->interrupts);
426 		wake_up_locked(&fiq->waitq);
427 	}
428 	spin_unlock(&fiq->waitq.lock);
429 	kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
430 }
431 
432 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
433 {
434 	struct fuse_iqueue *fiq = &fc->iq;
435 	int err;
436 
437 	if (!fc->no_interrupt) {
438 		/* Any signal may interrupt this */
439 		err = wait_event_interruptible(req->waitq,
440 					test_bit(FR_FINISHED, &req->flags));
441 		if (!err)
442 			return;
443 
444 		set_bit(FR_INTERRUPTED, &req->flags);
445 		/* matches barrier in fuse_dev_do_read() */
446 		smp_mb__after_atomic();
447 		if (test_bit(FR_SENT, &req->flags))
448 			queue_interrupt(fiq, req);
449 	}
450 
451 	if (!test_bit(FR_FORCE, &req->flags)) {
452 		/* Only fatal signals may interrupt this */
453 		err = wait_event_killable(req->waitq,
454 					test_bit(FR_FINISHED, &req->flags));
455 		if (!err)
456 			return;
457 
458 		spin_lock(&fiq->waitq.lock);
459 		/* Request is not yet in userspace, bail out */
460 		if (test_bit(FR_PENDING, &req->flags)) {
461 			list_del(&req->list);
462 			spin_unlock(&fiq->waitq.lock);
463 			__fuse_put_request(req);
464 			req->out.h.error = -EINTR;
465 			return;
466 		}
467 		spin_unlock(&fiq->waitq.lock);
468 	}
469 
470 	/*
471 	 * Either request is already in userspace, or it was forced.
472 	 * Wait it out.
473 	 */
474 	wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
475 }
476 
477 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
478 {
479 	struct fuse_iqueue *fiq = &fc->iq;
480 
481 	BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
482 	spin_lock(&fiq->waitq.lock);
483 	if (!fiq->connected) {
484 		spin_unlock(&fiq->waitq.lock);
485 		req->out.h.error = -ENOTCONN;
486 	} else {
487 		req->in.h.unique = fuse_get_unique(fiq);
488 		queue_request(fiq, req);
489 		/* acquire extra reference, since request is still needed
490 		   after request_end() */
491 		__fuse_get_request(req);
492 		spin_unlock(&fiq->waitq.lock);
493 
494 		request_wait_answer(fc, req);
495 		/* Pairs with smp_wmb() in request_end() */
496 		smp_rmb();
497 	}
498 }
499 
500 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
501 {
502 	__set_bit(FR_ISREPLY, &req->flags);
503 	if (!test_bit(FR_WAITING, &req->flags)) {
504 		__set_bit(FR_WAITING, &req->flags);
505 		atomic_inc(&fc->num_waiting);
506 	}
507 	__fuse_request_send(fc, req);
508 }
509 EXPORT_SYMBOL_GPL(fuse_request_send);
510 
511 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
512 {
513 	if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
514 		args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
515 
516 	if (fc->minor < 9) {
517 		switch (args->in.h.opcode) {
518 		case FUSE_LOOKUP:
519 		case FUSE_CREATE:
520 		case FUSE_MKNOD:
521 		case FUSE_MKDIR:
522 		case FUSE_SYMLINK:
523 		case FUSE_LINK:
524 			args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
525 			break;
526 		case FUSE_GETATTR:
527 		case FUSE_SETATTR:
528 			args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
529 			break;
530 		}
531 	}
532 	if (fc->minor < 12) {
533 		switch (args->in.h.opcode) {
534 		case FUSE_CREATE:
535 			args->in.args[0].size = sizeof(struct fuse_open_in);
536 			break;
537 		case FUSE_MKNOD:
538 			args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
539 			break;
540 		}
541 	}
542 }
543 
544 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
545 {
546 	struct fuse_req *req;
547 	ssize_t ret;
548 
549 	req = fuse_get_req(fc, 0);
550 	if (IS_ERR(req))
551 		return PTR_ERR(req);
552 
553 	/* Needs to be done after fuse_get_req() so that fc->minor is valid */
554 	fuse_adjust_compat(fc, args);
555 
556 	req->in.h.opcode = args->in.h.opcode;
557 	req->in.h.nodeid = args->in.h.nodeid;
558 	req->in.numargs = args->in.numargs;
559 	memcpy(req->in.args, args->in.args,
560 	       args->in.numargs * sizeof(struct fuse_in_arg));
561 	req->out.argvar = args->out.argvar;
562 	req->out.numargs = args->out.numargs;
563 	memcpy(req->out.args, args->out.args,
564 	       args->out.numargs * sizeof(struct fuse_arg));
565 	fuse_request_send(fc, req);
566 	ret = req->out.h.error;
567 	if (!ret && args->out.argvar) {
568 		BUG_ON(args->out.numargs != 1);
569 		ret = req->out.args[0].size;
570 	}
571 	fuse_put_request(fc, req);
572 
573 	return ret;
574 }
575 
576 /*
577  * Called under fc->lock
578  *
579  * fc->connected must have been checked previously
580  */
581 void fuse_request_send_background_locked(struct fuse_conn *fc,
582 					 struct fuse_req *req)
583 {
584 	BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
585 	if (!test_bit(FR_WAITING, &req->flags)) {
586 		__set_bit(FR_WAITING, &req->flags);
587 		atomic_inc(&fc->num_waiting);
588 	}
589 	__set_bit(FR_ISREPLY, &req->flags);
590 	fc->num_background++;
591 	if (fc->num_background == fc->max_background)
592 		fc->blocked = 1;
593 	if (fc->num_background == fc->congestion_threshold && fc->sb) {
594 		set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
595 		set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
596 	}
597 	list_add_tail(&req->list, &fc->bg_queue);
598 	flush_bg_queue(fc);
599 }
600 
601 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
602 {
603 	BUG_ON(!req->end);
604 	spin_lock(&fc->lock);
605 	if (fc->connected) {
606 		fuse_request_send_background_locked(fc, req);
607 		spin_unlock(&fc->lock);
608 	} else {
609 		spin_unlock(&fc->lock);
610 		req->out.h.error = -ENOTCONN;
611 		req->end(fc, req);
612 		fuse_put_request(fc, req);
613 	}
614 }
615 EXPORT_SYMBOL_GPL(fuse_request_send_background);
616 
617 static int fuse_request_send_notify_reply(struct fuse_conn *fc,
618 					  struct fuse_req *req, u64 unique)
619 {
620 	int err = -ENODEV;
621 	struct fuse_iqueue *fiq = &fc->iq;
622 
623 	__clear_bit(FR_ISREPLY, &req->flags);
624 	req->in.h.unique = unique;
625 	spin_lock(&fiq->waitq.lock);
626 	if (fiq->connected) {
627 		queue_request(fiq, req);
628 		err = 0;
629 	}
630 	spin_unlock(&fiq->waitq.lock);
631 
632 	return err;
633 }
634 
635 void fuse_force_forget(struct file *file, u64 nodeid)
636 {
637 	struct inode *inode = file_inode(file);
638 	struct fuse_conn *fc = get_fuse_conn(inode);
639 	struct fuse_req *req;
640 	struct fuse_forget_in inarg;
641 
642 	memset(&inarg, 0, sizeof(inarg));
643 	inarg.nlookup = 1;
644 	req = fuse_get_req_nofail_nopages(fc, file);
645 	req->in.h.opcode = FUSE_FORGET;
646 	req->in.h.nodeid = nodeid;
647 	req->in.numargs = 1;
648 	req->in.args[0].size = sizeof(inarg);
649 	req->in.args[0].value = &inarg;
650 	__clear_bit(FR_ISREPLY, &req->flags);
651 	__fuse_request_send(fc, req);
652 	/* ignore errors */
653 	fuse_put_request(fc, req);
654 }
655 
656 /*
657  * Lock the request.  Up to the next unlock_request() there mustn't be
658  * anything that could cause a page-fault.  If the request was already
659  * aborted bail out.
660  */
661 static int lock_request(struct fuse_req *req)
662 {
663 	int err = 0;
664 	if (req) {
665 		spin_lock(&req->waitq.lock);
666 		if (test_bit(FR_ABORTED, &req->flags))
667 			err = -ENOENT;
668 		else
669 			set_bit(FR_LOCKED, &req->flags);
670 		spin_unlock(&req->waitq.lock);
671 	}
672 	return err;
673 }
674 
675 /*
676  * Unlock request.  If it was aborted while locked, caller is responsible
677  * for unlocking and ending the request.
678  */
679 static int unlock_request(struct fuse_req *req)
680 {
681 	int err = 0;
682 	if (req) {
683 		spin_lock(&req->waitq.lock);
684 		if (test_bit(FR_ABORTED, &req->flags))
685 			err = -ENOENT;
686 		else
687 			clear_bit(FR_LOCKED, &req->flags);
688 		spin_unlock(&req->waitq.lock);
689 	}
690 	return err;
691 }
692 
693 struct fuse_copy_state {
694 	int write;
695 	struct fuse_req *req;
696 	struct iov_iter *iter;
697 	struct pipe_buffer *pipebufs;
698 	struct pipe_buffer *currbuf;
699 	struct pipe_inode_info *pipe;
700 	unsigned long nr_segs;
701 	struct page *pg;
702 	unsigned len;
703 	unsigned offset;
704 	unsigned move_pages:1;
705 };
706 
707 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
708 			   struct iov_iter *iter)
709 {
710 	memset(cs, 0, sizeof(*cs));
711 	cs->write = write;
712 	cs->iter = iter;
713 }
714 
715 /* Unmap and put previous page of userspace buffer */
716 static void fuse_copy_finish(struct fuse_copy_state *cs)
717 {
718 	if (cs->currbuf) {
719 		struct pipe_buffer *buf = cs->currbuf;
720 
721 		if (cs->write)
722 			buf->len = PAGE_SIZE - cs->len;
723 		cs->currbuf = NULL;
724 	} else if (cs->pg) {
725 		if (cs->write) {
726 			flush_dcache_page(cs->pg);
727 			set_page_dirty_lock(cs->pg);
728 		}
729 		put_page(cs->pg);
730 	}
731 	cs->pg = NULL;
732 }
733 
734 /*
735  * Get another pagefull of userspace buffer, and map it to kernel
736  * address space, and lock request
737  */
738 static int fuse_copy_fill(struct fuse_copy_state *cs)
739 {
740 	struct page *page;
741 	int err;
742 
743 	err = unlock_request(cs->req);
744 	if (err)
745 		return err;
746 
747 	fuse_copy_finish(cs);
748 	if (cs->pipebufs) {
749 		struct pipe_buffer *buf = cs->pipebufs;
750 
751 		if (!cs->write) {
752 			err = pipe_buf_confirm(cs->pipe, buf);
753 			if (err)
754 				return err;
755 
756 			BUG_ON(!cs->nr_segs);
757 			cs->currbuf = buf;
758 			cs->pg = buf->page;
759 			cs->offset = buf->offset;
760 			cs->len = buf->len;
761 			cs->pipebufs++;
762 			cs->nr_segs--;
763 		} else {
764 			if (cs->nr_segs == cs->pipe->buffers)
765 				return -EIO;
766 
767 			page = alloc_page(GFP_HIGHUSER);
768 			if (!page)
769 				return -ENOMEM;
770 
771 			buf->page = page;
772 			buf->offset = 0;
773 			buf->len = 0;
774 
775 			cs->currbuf = buf;
776 			cs->pg = page;
777 			cs->offset = 0;
778 			cs->len = PAGE_SIZE;
779 			cs->pipebufs++;
780 			cs->nr_segs++;
781 		}
782 	} else {
783 		size_t off;
784 		err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
785 		if (err < 0)
786 			return err;
787 		BUG_ON(!err);
788 		cs->len = err;
789 		cs->offset = off;
790 		cs->pg = page;
791 		iov_iter_advance(cs->iter, err);
792 	}
793 
794 	return lock_request(cs->req);
795 }
796 
797 /* Do as much copy to/from userspace buffer as we can */
798 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
799 {
800 	unsigned ncpy = min(*size, cs->len);
801 	if (val) {
802 		void *pgaddr = kmap_atomic(cs->pg);
803 		void *buf = pgaddr + cs->offset;
804 
805 		if (cs->write)
806 			memcpy(buf, *val, ncpy);
807 		else
808 			memcpy(*val, buf, ncpy);
809 
810 		kunmap_atomic(pgaddr);
811 		*val += ncpy;
812 	}
813 	*size -= ncpy;
814 	cs->len -= ncpy;
815 	cs->offset += ncpy;
816 	return ncpy;
817 }
818 
819 static int fuse_check_page(struct page *page)
820 {
821 	if (page_mapcount(page) ||
822 	    page->mapping != NULL ||
823 	    page_count(page) != 1 ||
824 	    (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
825 	     ~(1 << PG_locked |
826 	       1 << PG_referenced |
827 	       1 << PG_uptodate |
828 	       1 << PG_lru |
829 	       1 << PG_active |
830 	       1 << PG_reclaim))) {
831 		printk(KERN_WARNING "fuse: trying to steal weird page\n");
832 		printk(KERN_WARNING "  page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
833 		return 1;
834 	}
835 	return 0;
836 }
837 
838 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
839 {
840 	int err;
841 	struct page *oldpage = *pagep;
842 	struct page *newpage;
843 	struct pipe_buffer *buf = cs->pipebufs;
844 
845 	err = unlock_request(cs->req);
846 	if (err)
847 		return err;
848 
849 	fuse_copy_finish(cs);
850 
851 	err = pipe_buf_confirm(cs->pipe, buf);
852 	if (err)
853 		return err;
854 
855 	BUG_ON(!cs->nr_segs);
856 	cs->currbuf = buf;
857 	cs->len = buf->len;
858 	cs->pipebufs++;
859 	cs->nr_segs--;
860 
861 	if (cs->len != PAGE_SIZE)
862 		goto out_fallback;
863 
864 	if (pipe_buf_steal(cs->pipe, buf) != 0)
865 		goto out_fallback;
866 
867 	newpage = buf->page;
868 
869 	if (!PageUptodate(newpage))
870 		SetPageUptodate(newpage);
871 
872 	ClearPageMappedToDisk(newpage);
873 
874 	if (fuse_check_page(newpage) != 0)
875 		goto out_fallback_unlock;
876 
877 	/*
878 	 * This is a new and locked page, it shouldn't be mapped or
879 	 * have any special flags on it
880 	 */
881 	if (WARN_ON(page_mapped(oldpage)))
882 		goto out_fallback_unlock;
883 	if (WARN_ON(page_has_private(oldpage)))
884 		goto out_fallback_unlock;
885 	if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
886 		goto out_fallback_unlock;
887 	if (WARN_ON(PageMlocked(oldpage)))
888 		goto out_fallback_unlock;
889 
890 	err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
891 	if (err) {
892 		unlock_page(newpage);
893 		return err;
894 	}
895 
896 	get_page(newpage);
897 
898 	if (!(buf->flags & PIPE_BUF_FLAG_LRU))
899 		lru_cache_add_file(newpage);
900 
901 	err = 0;
902 	spin_lock(&cs->req->waitq.lock);
903 	if (test_bit(FR_ABORTED, &cs->req->flags))
904 		err = -ENOENT;
905 	else
906 		*pagep = newpage;
907 	spin_unlock(&cs->req->waitq.lock);
908 
909 	if (err) {
910 		unlock_page(newpage);
911 		put_page(newpage);
912 		return err;
913 	}
914 
915 	unlock_page(oldpage);
916 	put_page(oldpage);
917 	cs->len = 0;
918 
919 	return 0;
920 
921 out_fallback_unlock:
922 	unlock_page(newpage);
923 out_fallback:
924 	cs->pg = buf->page;
925 	cs->offset = buf->offset;
926 
927 	err = lock_request(cs->req);
928 	if (err)
929 		return err;
930 
931 	return 1;
932 }
933 
934 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
935 			 unsigned offset, unsigned count)
936 {
937 	struct pipe_buffer *buf;
938 	int err;
939 
940 	if (cs->nr_segs == cs->pipe->buffers)
941 		return -EIO;
942 
943 	err = unlock_request(cs->req);
944 	if (err)
945 		return err;
946 
947 	fuse_copy_finish(cs);
948 
949 	buf = cs->pipebufs;
950 	get_page(page);
951 	buf->page = page;
952 	buf->offset = offset;
953 	buf->len = count;
954 
955 	cs->pipebufs++;
956 	cs->nr_segs++;
957 	cs->len = 0;
958 
959 	return 0;
960 }
961 
962 /*
963  * Copy a page in the request to/from the userspace buffer.  Must be
964  * done atomically
965  */
966 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
967 			  unsigned offset, unsigned count, int zeroing)
968 {
969 	int err;
970 	struct page *page = *pagep;
971 
972 	if (page && zeroing && count < PAGE_SIZE)
973 		clear_highpage(page);
974 
975 	while (count) {
976 		if (cs->write && cs->pipebufs && page) {
977 			return fuse_ref_page(cs, page, offset, count);
978 		} else if (!cs->len) {
979 			if (cs->move_pages && page &&
980 			    offset == 0 && count == PAGE_SIZE) {
981 				err = fuse_try_move_page(cs, pagep);
982 				if (err <= 0)
983 					return err;
984 			} else {
985 				err = fuse_copy_fill(cs);
986 				if (err)
987 					return err;
988 			}
989 		}
990 		if (page) {
991 			void *mapaddr = kmap_atomic(page);
992 			void *buf = mapaddr + offset;
993 			offset += fuse_copy_do(cs, &buf, &count);
994 			kunmap_atomic(mapaddr);
995 		} else
996 			offset += fuse_copy_do(cs, NULL, &count);
997 	}
998 	if (page && !cs->write)
999 		flush_dcache_page(page);
1000 	return 0;
1001 }
1002 
1003 /* Copy pages in the request to/from userspace buffer */
1004 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
1005 			   int zeroing)
1006 {
1007 	unsigned i;
1008 	struct fuse_req *req = cs->req;
1009 
1010 	for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
1011 		int err;
1012 		unsigned offset = req->page_descs[i].offset;
1013 		unsigned count = min(nbytes, req->page_descs[i].length);
1014 
1015 		err = fuse_copy_page(cs, &req->pages[i], offset, count,
1016 				     zeroing);
1017 		if (err)
1018 			return err;
1019 
1020 		nbytes -= count;
1021 	}
1022 	return 0;
1023 }
1024 
1025 /* Copy a single argument in the request to/from userspace buffer */
1026 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1027 {
1028 	while (size) {
1029 		if (!cs->len) {
1030 			int err = fuse_copy_fill(cs);
1031 			if (err)
1032 				return err;
1033 		}
1034 		fuse_copy_do(cs, &val, &size);
1035 	}
1036 	return 0;
1037 }
1038 
1039 /* Copy request arguments to/from userspace buffer */
1040 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1041 			  unsigned argpages, struct fuse_arg *args,
1042 			  int zeroing)
1043 {
1044 	int err = 0;
1045 	unsigned i;
1046 
1047 	for (i = 0; !err && i < numargs; i++)  {
1048 		struct fuse_arg *arg = &args[i];
1049 		if (i == numargs - 1 && argpages)
1050 			err = fuse_copy_pages(cs, arg->size, zeroing);
1051 		else
1052 			err = fuse_copy_one(cs, arg->value, arg->size);
1053 	}
1054 	return err;
1055 }
1056 
1057 static int forget_pending(struct fuse_iqueue *fiq)
1058 {
1059 	return fiq->forget_list_head.next != NULL;
1060 }
1061 
1062 static int request_pending(struct fuse_iqueue *fiq)
1063 {
1064 	return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1065 		forget_pending(fiq);
1066 }
1067 
1068 /*
1069  * Transfer an interrupt request to userspace
1070  *
1071  * Unlike other requests this is assembled on demand, without a need
1072  * to allocate a separate fuse_req structure.
1073  *
1074  * Called with fiq->waitq.lock held, releases it
1075  */
1076 static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1077 			       struct fuse_copy_state *cs,
1078 			       size_t nbytes, struct fuse_req *req)
1079 __releases(fiq->waitq.lock)
1080 {
1081 	struct fuse_in_header ih;
1082 	struct fuse_interrupt_in arg;
1083 	unsigned reqsize = sizeof(ih) + sizeof(arg);
1084 	int err;
1085 
1086 	list_del_init(&req->intr_entry);
1087 	req->intr_unique = fuse_get_unique(fiq);
1088 	memset(&ih, 0, sizeof(ih));
1089 	memset(&arg, 0, sizeof(arg));
1090 	ih.len = reqsize;
1091 	ih.opcode = FUSE_INTERRUPT;
1092 	ih.unique = req->intr_unique;
1093 	arg.unique = req->in.h.unique;
1094 
1095 	spin_unlock(&fiq->waitq.lock);
1096 	if (nbytes < reqsize)
1097 		return -EINVAL;
1098 
1099 	err = fuse_copy_one(cs, &ih, sizeof(ih));
1100 	if (!err)
1101 		err = fuse_copy_one(cs, &arg, sizeof(arg));
1102 	fuse_copy_finish(cs);
1103 
1104 	return err ? err : reqsize;
1105 }
1106 
1107 static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
1108 					       unsigned max,
1109 					       unsigned *countp)
1110 {
1111 	struct fuse_forget_link *head = fiq->forget_list_head.next;
1112 	struct fuse_forget_link **newhead = &head;
1113 	unsigned count;
1114 
1115 	for (count = 0; *newhead != NULL && count < max; count++)
1116 		newhead = &(*newhead)->next;
1117 
1118 	fiq->forget_list_head.next = *newhead;
1119 	*newhead = NULL;
1120 	if (fiq->forget_list_head.next == NULL)
1121 		fiq->forget_list_tail = &fiq->forget_list_head;
1122 
1123 	if (countp != NULL)
1124 		*countp = count;
1125 
1126 	return head;
1127 }
1128 
1129 static int fuse_read_single_forget(struct fuse_iqueue *fiq,
1130 				   struct fuse_copy_state *cs,
1131 				   size_t nbytes)
1132 __releases(fiq->waitq.lock)
1133 {
1134 	int err;
1135 	struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
1136 	struct fuse_forget_in arg = {
1137 		.nlookup = forget->forget_one.nlookup,
1138 	};
1139 	struct fuse_in_header ih = {
1140 		.opcode = FUSE_FORGET,
1141 		.nodeid = forget->forget_one.nodeid,
1142 		.unique = fuse_get_unique(fiq),
1143 		.len = sizeof(ih) + sizeof(arg),
1144 	};
1145 
1146 	spin_unlock(&fiq->waitq.lock);
1147 	kfree(forget);
1148 	if (nbytes < ih.len)
1149 		return -EINVAL;
1150 
1151 	err = fuse_copy_one(cs, &ih, sizeof(ih));
1152 	if (!err)
1153 		err = fuse_copy_one(cs, &arg, sizeof(arg));
1154 	fuse_copy_finish(cs);
1155 
1156 	if (err)
1157 		return err;
1158 
1159 	return ih.len;
1160 }
1161 
1162 static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
1163 				   struct fuse_copy_state *cs, size_t nbytes)
1164 __releases(fiq->waitq.lock)
1165 {
1166 	int err;
1167 	unsigned max_forgets;
1168 	unsigned count;
1169 	struct fuse_forget_link *head;
1170 	struct fuse_batch_forget_in arg = { .count = 0 };
1171 	struct fuse_in_header ih = {
1172 		.opcode = FUSE_BATCH_FORGET,
1173 		.unique = fuse_get_unique(fiq),
1174 		.len = sizeof(ih) + sizeof(arg),
1175 	};
1176 
1177 	if (nbytes < ih.len) {
1178 		spin_unlock(&fiq->waitq.lock);
1179 		return -EINVAL;
1180 	}
1181 
1182 	max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1183 	head = dequeue_forget(fiq, max_forgets, &count);
1184 	spin_unlock(&fiq->waitq.lock);
1185 
1186 	arg.count = count;
1187 	ih.len += count * sizeof(struct fuse_forget_one);
1188 	err = fuse_copy_one(cs, &ih, sizeof(ih));
1189 	if (!err)
1190 		err = fuse_copy_one(cs, &arg, sizeof(arg));
1191 
1192 	while (head) {
1193 		struct fuse_forget_link *forget = head;
1194 
1195 		if (!err) {
1196 			err = fuse_copy_one(cs, &forget->forget_one,
1197 					    sizeof(forget->forget_one));
1198 		}
1199 		head = forget->next;
1200 		kfree(forget);
1201 	}
1202 
1203 	fuse_copy_finish(cs);
1204 
1205 	if (err)
1206 		return err;
1207 
1208 	return ih.len;
1209 }
1210 
1211 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1212 			    struct fuse_copy_state *cs,
1213 			    size_t nbytes)
1214 __releases(fiq->waitq.lock)
1215 {
1216 	if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
1217 		return fuse_read_single_forget(fiq, cs, nbytes);
1218 	else
1219 		return fuse_read_batch_forget(fiq, cs, nbytes);
1220 }
1221 
1222 /*
1223  * Read a single request into the userspace filesystem's buffer.  This
1224  * function waits until a request is available, then removes it from
1225  * the pending list and copies request data to userspace buffer.  If
1226  * no reply is needed (FORGET) or request has been aborted or there
1227  * was an error during the copying then it's finished by calling
1228  * request_end().  Otherwise add it to the processing list, and set
1229  * the 'sent' flag.
1230  */
1231 static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
1232 				struct fuse_copy_state *cs, size_t nbytes)
1233 {
1234 	ssize_t err;
1235 	struct fuse_conn *fc = fud->fc;
1236 	struct fuse_iqueue *fiq = &fc->iq;
1237 	struct fuse_pqueue *fpq = &fud->pq;
1238 	struct fuse_req *req;
1239 	struct fuse_in *in;
1240 	unsigned reqsize;
1241 
1242  restart:
1243 	spin_lock(&fiq->waitq.lock);
1244 	err = -EAGAIN;
1245 	if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
1246 	    !request_pending(fiq))
1247 		goto err_unlock;
1248 
1249 	err = wait_event_interruptible_exclusive_locked(fiq->waitq,
1250 				!fiq->connected || request_pending(fiq));
1251 	if (err)
1252 		goto err_unlock;
1253 
1254 	if (!fiq->connected) {
1255 		err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV;
1256 		goto err_unlock;
1257 	}
1258 
1259 	if (!list_empty(&fiq->interrupts)) {
1260 		req = list_entry(fiq->interrupts.next, struct fuse_req,
1261 				 intr_entry);
1262 		return fuse_read_interrupt(fiq, cs, nbytes, req);
1263 	}
1264 
1265 	if (forget_pending(fiq)) {
1266 		if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
1267 			return fuse_read_forget(fc, fiq, cs, nbytes);
1268 
1269 		if (fiq->forget_batch <= -8)
1270 			fiq->forget_batch = 16;
1271 	}
1272 
1273 	req = list_entry(fiq->pending.next, struct fuse_req, list);
1274 	clear_bit(FR_PENDING, &req->flags);
1275 	list_del_init(&req->list);
1276 	spin_unlock(&fiq->waitq.lock);
1277 
1278 	in = &req->in;
1279 	reqsize = in->h.len;
1280 
1281 	/* If request is too large, reply with an error and restart the read */
1282 	if (nbytes < reqsize) {
1283 		req->out.h.error = -EIO;
1284 		/* SETXATTR is special, since it may contain too large data */
1285 		if (in->h.opcode == FUSE_SETXATTR)
1286 			req->out.h.error = -E2BIG;
1287 		request_end(fc, req);
1288 		goto restart;
1289 	}
1290 	spin_lock(&fpq->lock);
1291 	list_add(&req->list, &fpq->io);
1292 	spin_unlock(&fpq->lock);
1293 	cs->req = req;
1294 	err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1295 	if (!err)
1296 		err = fuse_copy_args(cs, in->numargs, in->argpages,
1297 				     (struct fuse_arg *) in->args, 0);
1298 	fuse_copy_finish(cs);
1299 	spin_lock(&fpq->lock);
1300 	clear_bit(FR_LOCKED, &req->flags);
1301 	if (!fpq->connected) {
1302 		err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV;
1303 		goto out_end;
1304 	}
1305 	if (err) {
1306 		req->out.h.error = -EIO;
1307 		goto out_end;
1308 	}
1309 	if (!test_bit(FR_ISREPLY, &req->flags)) {
1310 		err = reqsize;
1311 		goto out_end;
1312 	}
1313 	list_move_tail(&req->list, &fpq->processing);
1314 	spin_unlock(&fpq->lock);
1315 	set_bit(FR_SENT, &req->flags);
1316 	/* matches barrier in request_wait_answer() */
1317 	smp_mb__after_atomic();
1318 	if (test_bit(FR_INTERRUPTED, &req->flags))
1319 		queue_interrupt(fiq, req);
1320 
1321 	return reqsize;
1322 
1323 out_end:
1324 	if (!test_bit(FR_PRIVATE, &req->flags))
1325 		list_del_init(&req->list);
1326 	spin_unlock(&fpq->lock);
1327 	request_end(fc, req);
1328 	return err;
1329 
1330  err_unlock:
1331 	spin_unlock(&fiq->waitq.lock);
1332 	return err;
1333 }
1334 
1335 static int fuse_dev_open(struct inode *inode, struct file *file)
1336 {
1337 	/*
1338 	 * The fuse device's file's private_data is used to hold
1339 	 * the fuse_conn(ection) when it is mounted, and is used to
1340 	 * keep track of whether the file has been mounted already.
1341 	 */
1342 	file->private_data = NULL;
1343 	return 0;
1344 }
1345 
1346 static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
1347 {
1348 	struct fuse_copy_state cs;
1349 	struct file *file = iocb->ki_filp;
1350 	struct fuse_dev *fud = fuse_get_dev(file);
1351 
1352 	if (!fud)
1353 		return -EPERM;
1354 
1355 	if (!iter_is_iovec(to))
1356 		return -EINVAL;
1357 
1358 	fuse_copy_init(&cs, 1, to);
1359 
1360 	return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
1361 }
1362 
1363 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1364 				    struct pipe_inode_info *pipe,
1365 				    size_t len, unsigned int flags)
1366 {
1367 	int total, ret;
1368 	int page_nr = 0;
1369 	struct pipe_buffer *bufs;
1370 	struct fuse_copy_state cs;
1371 	struct fuse_dev *fud = fuse_get_dev(in);
1372 
1373 	if (!fud)
1374 		return -EPERM;
1375 
1376 	bufs = kvmalloc_array(pipe->buffers, sizeof(struct pipe_buffer),
1377 			      GFP_KERNEL);
1378 	if (!bufs)
1379 		return -ENOMEM;
1380 
1381 	fuse_copy_init(&cs, 1, NULL);
1382 	cs.pipebufs = bufs;
1383 	cs.pipe = pipe;
1384 	ret = fuse_dev_do_read(fud, in, &cs, len);
1385 	if (ret < 0)
1386 		goto out;
1387 
1388 	if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1389 		ret = -EIO;
1390 		goto out;
1391 	}
1392 
1393 	for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
1394 		/*
1395 		 * Need to be careful about this.  Having buf->ops in module
1396 		 * code can Oops if the buffer persists after module unload.
1397 		 */
1398 		bufs[page_nr].ops = &nosteal_pipe_buf_ops;
1399 		bufs[page_nr].flags = 0;
1400 		ret = add_to_pipe(pipe, &bufs[page_nr++]);
1401 		if (unlikely(ret < 0))
1402 			break;
1403 	}
1404 	if (total)
1405 		ret = total;
1406 out:
1407 	for (; page_nr < cs.nr_segs; page_nr++)
1408 		put_page(bufs[page_nr].page);
1409 
1410 	kvfree(bufs);
1411 	return ret;
1412 }
1413 
1414 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1415 			    struct fuse_copy_state *cs)
1416 {
1417 	struct fuse_notify_poll_wakeup_out outarg;
1418 	int err = -EINVAL;
1419 
1420 	if (size != sizeof(outarg))
1421 		goto err;
1422 
1423 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1424 	if (err)
1425 		goto err;
1426 
1427 	fuse_copy_finish(cs);
1428 	return fuse_notify_poll_wakeup(fc, &outarg);
1429 
1430 err:
1431 	fuse_copy_finish(cs);
1432 	return err;
1433 }
1434 
1435 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1436 				   struct fuse_copy_state *cs)
1437 {
1438 	struct fuse_notify_inval_inode_out outarg;
1439 	int err = -EINVAL;
1440 
1441 	if (size != sizeof(outarg))
1442 		goto err;
1443 
1444 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1445 	if (err)
1446 		goto err;
1447 	fuse_copy_finish(cs);
1448 
1449 	down_read(&fc->killsb);
1450 	err = -ENOENT;
1451 	if (fc->sb) {
1452 		err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1453 					       outarg.off, outarg.len);
1454 	}
1455 	up_read(&fc->killsb);
1456 	return err;
1457 
1458 err:
1459 	fuse_copy_finish(cs);
1460 	return err;
1461 }
1462 
1463 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1464 				   struct fuse_copy_state *cs)
1465 {
1466 	struct fuse_notify_inval_entry_out outarg;
1467 	int err = -ENOMEM;
1468 	char *buf;
1469 	struct qstr name;
1470 
1471 	buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1472 	if (!buf)
1473 		goto err;
1474 
1475 	err = -EINVAL;
1476 	if (size < sizeof(outarg))
1477 		goto err;
1478 
1479 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1480 	if (err)
1481 		goto err;
1482 
1483 	err = -ENAMETOOLONG;
1484 	if (outarg.namelen > FUSE_NAME_MAX)
1485 		goto err;
1486 
1487 	err = -EINVAL;
1488 	if (size != sizeof(outarg) + outarg.namelen + 1)
1489 		goto err;
1490 
1491 	name.name = buf;
1492 	name.len = outarg.namelen;
1493 	err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1494 	if (err)
1495 		goto err;
1496 	fuse_copy_finish(cs);
1497 	buf[outarg.namelen] = 0;
1498 
1499 	down_read(&fc->killsb);
1500 	err = -ENOENT;
1501 	if (fc->sb)
1502 		err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1503 	up_read(&fc->killsb);
1504 	kfree(buf);
1505 	return err;
1506 
1507 err:
1508 	kfree(buf);
1509 	fuse_copy_finish(cs);
1510 	return err;
1511 }
1512 
1513 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1514 			      struct fuse_copy_state *cs)
1515 {
1516 	struct fuse_notify_delete_out outarg;
1517 	int err = -ENOMEM;
1518 	char *buf;
1519 	struct qstr name;
1520 
1521 	buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1522 	if (!buf)
1523 		goto err;
1524 
1525 	err = -EINVAL;
1526 	if (size < sizeof(outarg))
1527 		goto err;
1528 
1529 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1530 	if (err)
1531 		goto err;
1532 
1533 	err = -ENAMETOOLONG;
1534 	if (outarg.namelen > FUSE_NAME_MAX)
1535 		goto err;
1536 
1537 	err = -EINVAL;
1538 	if (size != sizeof(outarg) + outarg.namelen + 1)
1539 		goto err;
1540 
1541 	name.name = buf;
1542 	name.len = outarg.namelen;
1543 	err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1544 	if (err)
1545 		goto err;
1546 	fuse_copy_finish(cs);
1547 	buf[outarg.namelen] = 0;
1548 
1549 	down_read(&fc->killsb);
1550 	err = -ENOENT;
1551 	if (fc->sb)
1552 		err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1553 					       outarg.child, &name);
1554 	up_read(&fc->killsb);
1555 	kfree(buf);
1556 	return err;
1557 
1558 err:
1559 	kfree(buf);
1560 	fuse_copy_finish(cs);
1561 	return err;
1562 }
1563 
1564 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1565 			     struct fuse_copy_state *cs)
1566 {
1567 	struct fuse_notify_store_out outarg;
1568 	struct inode *inode;
1569 	struct address_space *mapping;
1570 	u64 nodeid;
1571 	int err;
1572 	pgoff_t index;
1573 	unsigned int offset;
1574 	unsigned int num;
1575 	loff_t file_size;
1576 	loff_t end;
1577 
1578 	err = -EINVAL;
1579 	if (size < sizeof(outarg))
1580 		goto out_finish;
1581 
1582 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1583 	if (err)
1584 		goto out_finish;
1585 
1586 	err = -EINVAL;
1587 	if (size - sizeof(outarg) != outarg.size)
1588 		goto out_finish;
1589 
1590 	nodeid = outarg.nodeid;
1591 
1592 	down_read(&fc->killsb);
1593 
1594 	err = -ENOENT;
1595 	if (!fc->sb)
1596 		goto out_up_killsb;
1597 
1598 	inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1599 	if (!inode)
1600 		goto out_up_killsb;
1601 
1602 	mapping = inode->i_mapping;
1603 	index = outarg.offset >> PAGE_SHIFT;
1604 	offset = outarg.offset & ~PAGE_MASK;
1605 	file_size = i_size_read(inode);
1606 	end = outarg.offset + outarg.size;
1607 	if (end > file_size) {
1608 		file_size = end;
1609 		fuse_write_update_size(inode, file_size);
1610 	}
1611 
1612 	num = outarg.size;
1613 	while (num) {
1614 		struct page *page;
1615 		unsigned int this_num;
1616 
1617 		err = -ENOMEM;
1618 		page = find_or_create_page(mapping, index,
1619 					   mapping_gfp_mask(mapping));
1620 		if (!page)
1621 			goto out_iput;
1622 
1623 		this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1624 		err = fuse_copy_page(cs, &page, offset, this_num, 0);
1625 		if (!err && offset == 0 &&
1626 		    (this_num == PAGE_SIZE || file_size == end))
1627 			SetPageUptodate(page);
1628 		unlock_page(page);
1629 		put_page(page);
1630 
1631 		if (err)
1632 			goto out_iput;
1633 
1634 		num -= this_num;
1635 		offset = 0;
1636 		index++;
1637 	}
1638 
1639 	err = 0;
1640 
1641 out_iput:
1642 	iput(inode);
1643 out_up_killsb:
1644 	up_read(&fc->killsb);
1645 out_finish:
1646 	fuse_copy_finish(cs);
1647 	return err;
1648 }
1649 
1650 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1651 {
1652 	release_pages(req->pages, req->num_pages);
1653 }
1654 
1655 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1656 			 struct fuse_notify_retrieve_out *outarg)
1657 {
1658 	int err;
1659 	struct address_space *mapping = inode->i_mapping;
1660 	struct fuse_req *req;
1661 	pgoff_t index;
1662 	loff_t file_size;
1663 	unsigned int num;
1664 	unsigned int offset;
1665 	size_t total_len = 0;
1666 	int num_pages;
1667 
1668 	offset = outarg->offset & ~PAGE_MASK;
1669 	file_size = i_size_read(inode);
1670 
1671 	num = outarg->size;
1672 	if (outarg->offset > file_size)
1673 		num = 0;
1674 	else if (outarg->offset + num > file_size)
1675 		num = file_size - outarg->offset;
1676 
1677 	num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1678 	num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1679 
1680 	req = fuse_get_req(fc, num_pages);
1681 	if (IS_ERR(req))
1682 		return PTR_ERR(req);
1683 
1684 	req->in.h.opcode = FUSE_NOTIFY_REPLY;
1685 	req->in.h.nodeid = outarg->nodeid;
1686 	req->in.numargs = 2;
1687 	req->in.argpages = 1;
1688 	req->page_descs[0].offset = offset;
1689 	req->end = fuse_retrieve_end;
1690 
1691 	index = outarg->offset >> PAGE_SHIFT;
1692 
1693 	while (num && req->num_pages < num_pages) {
1694 		struct page *page;
1695 		unsigned int this_num;
1696 
1697 		page = find_get_page(mapping, index);
1698 		if (!page)
1699 			break;
1700 
1701 		this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1702 		req->pages[req->num_pages] = page;
1703 		req->page_descs[req->num_pages].length = this_num;
1704 		req->num_pages++;
1705 
1706 		offset = 0;
1707 		num -= this_num;
1708 		total_len += this_num;
1709 		index++;
1710 	}
1711 	req->misc.retrieve_in.offset = outarg->offset;
1712 	req->misc.retrieve_in.size = total_len;
1713 	req->in.args[0].size = sizeof(req->misc.retrieve_in);
1714 	req->in.args[0].value = &req->misc.retrieve_in;
1715 	req->in.args[1].size = total_len;
1716 
1717 	err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1718 	if (err)
1719 		fuse_retrieve_end(fc, req);
1720 
1721 	return err;
1722 }
1723 
1724 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1725 				struct fuse_copy_state *cs)
1726 {
1727 	struct fuse_notify_retrieve_out outarg;
1728 	struct inode *inode;
1729 	int err;
1730 
1731 	err = -EINVAL;
1732 	if (size != sizeof(outarg))
1733 		goto copy_finish;
1734 
1735 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1736 	if (err)
1737 		goto copy_finish;
1738 
1739 	fuse_copy_finish(cs);
1740 
1741 	down_read(&fc->killsb);
1742 	err = -ENOENT;
1743 	if (fc->sb) {
1744 		u64 nodeid = outarg.nodeid;
1745 
1746 		inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1747 		if (inode) {
1748 			err = fuse_retrieve(fc, inode, &outarg);
1749 			iput(inode);
1750 		}
1751 	}
1752 	up_read(&fc->killsb);
1753 
1754 	return err;
1755 
1756 copy_finish:
1757 	fuse_copy_finish(cs);
1758 	return err;
1759 }
1760 
1761 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1762 		       unsigned int size, struct fuse_copy_state *cs)
1763 {
1764 	/* Don't try to move pages (yet) */
1765 	cs->move_pages = 0;
1766 
1767 	switch (code) {
1768 	case FUSE_NOTIFY_POLL:
1769 		return fuse_notify_poll(fc, size, cs);
1770 
1771 	case FUSE_NOTIFY_INVAL_INODE:
1772 		return fuse_notify_inval_inode(fc, size, cs);
1773 
1774 	case FUSE_NOTIFY_INVAL_ENTRY:
1775 		return fuse_notify_inval_entry(fc, size, cs);
1776 
1777 	case FUSE_NOTIFY_STORE:
1778 		return fuse_notify_store(fc, size, cs);
1779 
1780 	case FUSE_NOTIFY_RETRIEVE:
1781 		return fuse_notify_retrieve(fc, size, cs);
1782 
1783 	case FUSE_NOTIFY_DELETE:
1784 		return fuse_notify_delete(fc, size, cs);
1785 
1786 	default:
1787 		fuse_copy_finish(cs);
1788 		return -EINVAL;
1789 	}
1790 }
1791 
1792 /* Look up request on processing list by unique ID */
1793 static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
1794 {
1795 	struct fuse_req *req;
1796 
1797 	list_for_each_entry(req, &fpq->processing, list) {
1798 		if (req->in.h.unique == unique || req->intr_unique == unique)
1799 			return req;
1800 	}
1801 	return NULL;
1802 }
1803 
1804 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1805 			 unsigned nbytes)
1806 {
1807 	unsigned reqsize = sizeof(struct fuse_out_header);
1808 
1809 	if (out->h.error)
1810 		return nbytes != reqsize ? -EINVAL : 0;
1811 
1812 	reqsize += len_args(out->numargs, out->args);
1813 
1814 	if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1815 		return -EINVAL;
1816 	else if (reqsize > nbytes) {
1817 		struct fuse_arg *lastarg = &out->args[out->numargs-1];
1818 		unsigned diffsize = reqsize - nbytes;
1819 		if (diffsize > lastarg->size)
1820 			return -EINVAL;
1821 		lastarg->size -= diffsize;
1822 	}
1823 	return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1824 			      out->page_zeroing);
1825 }
1826 
1827 /*
1828  * Write a single reply to a request.  First the header is copied from
1829  * the write buffer.  The request is then searched on the processing
1830  * list by the unique ID found in the header.  If found, then remove
1831  * it from the list and copy the rest of the buffer to the request.
1832  * The request is finished by calling request_end()
1833  */
1834 static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
1835 				 struct fuse_copy_state *cs, size_t nbytes)
1836 {
1837 	int err;
1838 	struct fuse_conn *fc = fud->fc;
1839 	struct fuse_pqueue *fpq = &fud->pq;
1840 	struct fuse_req *req;
1841 	struct fuse_out_header oh;
1842 
1843 	if (nbytes < sizeof(struct fuse_out_header))
1844 		return -EINVAL;
1845 
1846 	err = fuse_copy_one(cs, &oh, sizeof(oh));
1847 	if (err)
1848 		goto err_finish;
1849 
1850 	err = -EINVAL;
1851 	if (oh.len != nbytes)
1852 		goto err_finish;
1853 
1854 	/*
1855 	 * Zero oh.unique indicates unsolicited notification message
1856 	 * and error contains notification code.
1857 	 */
1858 	if (!oh.unique) {
1859 		err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1860 		return err ? err : nbytes;
1861 	}
1862 
1863 	err = -EINVAL;
1864 	if (oh.error <= -1000 || oh.error > 0)
1865 		goto err_finish;
1866 
1867 	spin_lock(&fpq->lock);
1868 	err = -ENOENT;
1869 	if (!fpq->connected)
1870 		goto err_unlock_pq;
1871 
1872 	req = request_find(fpq, oh.unique);
1873 	if (!req)
1874 		goto err_unlock_pq;
1875 
1876 	/* Is it an interrupt reply? */
1877 	if (req->intr_unique == oh.unique) {
1878 		spin_unlock(&fpq->lock);
1879 
1880 		err = -EINVAL;
1881 		if (nbytes != sizeof(struct fuse_out_header))
1882 			goto err_finish;
1883 
1884 		if (oh.error == -ENOSYS)
1885 			fc->no_interrupt = 1;
1886 		else if (oh.error == -EAGAIN)
1887 			queue_interrupt(&fc->iq, req);
1888 
1889 		fuse_copy_finish(cs);
1890 		return nbytes;
1891 	}
1892 
1893 	clear_bit(FR_SENT, &req->flags);
1894 	list_move(&req->list, &fpq->io);
1895 	req->out.h = oh;
1896 	set_bit(FR_LOCKED, &req->flags);
1897 	spin_unlock(&fpq->lock);
1898 	cs->req = req;
1899 	if (!req->out.page_replace)
1900 		cs->move_pages = 0;
1901 
1902 	err = copy_out_args(cs, &req->out, nbytes);
1903 	fuse_copy_finish(cs);
1904 
1905 	spin_lock(&fpq->lock);
1906 	clear_bit(FR_LOCKED, &req->flags);
1907 	if (!fpq->connected)
1908 		err = -ENOENT;
1909 	else if (err)
1910 		req->out.h.error = -EIO;
1911 	if (!test_bit(FR_PRIVATE, &req->flags))
1912 		list_del_init(&req->list);
1913 	spin_unlock(&fpq->lock);
1914 
1915 	request_end(fc, req);
1916 
1917 	return err ? err : nbytes;
1918 
1919  err_unlock_pq:
1920 	spin_unlock(&fpq->lock);
1921  err_finish:
1922 	fuse_copy_finish(cs);
1923 	return err;
1924 }
1925 
1926 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
1927 {
1928 	struct fuse_copy_state cs;
1929 	struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
1930 
1931 	if (!fud)
1932 		return -EPERM;
1933 
1934 	if (!iter_is_iovec(from))
1935 		return -EINVAL;
1936 
1937 	fuse_copy_init(&cs, 0, from);
1938 
1939 	return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
1940 }
1941 
1942 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1943 				     struct file *out, loff_t *ppos,
1944 				     size_t len, unsigned int flags)
1945 {
1946 	unsigned nbuf;
1947 	unsigned idx;
1948 	struct pipe_buffer *bufs;
1949 	struct fuse_copy_state cs;
1950 	struct fuse_dev *fud;
1951 	size_t rem;
1952 	ssize_t ret;
1953 
1954 	fud = fuse_get_dev(out);
1955 	if (!fud)
1956 		return -EPERM;
1957 
1958 	pipe_lock(pipe);
1959 
1960 	bufs = kvmalloc_array(pipe->nrbufs, sizeof(struct pipe_buffer),
1961 			      GFP_KERNEL);
1962 	if (!bufs) {
1963 		pipe_unlock(pipe);
1964 		return -ENOMEM;
1965 	}
1966 
1967 	nbuf = 0;
1968 	rem = 0;
1969 	for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1970 		rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1971 
1972 	ret = -EINVAL;
1973 	if (rem < len) {
1974 		pipe_unlock(pipe);
1975 		goto out;
1976 	}
1977 
1978 	rem = len;
1979 	while (rem) {
1980 		struct pipe_buffer *ibuf;
1981 		struct pipe_buffer *obuf;
1982 
1983 		BUG_ON(nbuf >= pipe->buffers);
1984 		BUG_ON(!pipe->nrbufs);
1985 		ibuf = &pipe->bufs[pipe->curbuf];
1986 		obuf = &bufs[nbuf];
1987 
1988 		if (rem >= ibuf->len) {
1989 			*obuf = *ibuf;
1990 			ibuf->ops = NULL;
1991 			pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1992 			pipe->nrbufs--;
1993 		} else {
1994 			pipe_buf_get(pipe, ibuf);
1995 			*obuf = *ibuf;
1996 			obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1997 			obuf->len = rem;
1998 			ibuf->offset += obuf->len;
1999 			ibuf->len -= obuf->len;
2000 		}
2001 		nbuf++;
2002 		rem -= obuf->len;
2003 	}
2004 	pipe_unlock(pipe);
2005 
2006 	fuse_copy_init(&cs, 0, NULL);
2007 	cs.pipebufs = bufs;
2008 	cs.nr_segs = nbuf;
2009 	cs.pipe = pipe;
2010 
2011 	if (flags & SPLICE_F_MOVE)
2012 		cs.move_pages = 1;
2013 
2014 	ret = fuse_dev_do_write(fud, &cs, len);
2015 
2016 	for (idx = 0; idx < nbuf; idx++)
2017 		pipe_buf_release(pipe, &bufs[idx]);
2018 
2019 out:
2020 	kvfree(bufs);
2021 	return ret;
2022 }
2023 
2024 static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
2025 {
2026 	__poll_t mask = EPOLLOUT | EPOLLWRNORM;
2027 	struct fuse_iqueue *fiq;
2028 	struct fuse_dev *fud = fuse_get_dev(file);
2029 
2030 	if (!fud)
2031 		return EPOLLERR;
2032 
2033 	fiq = &fud->fc->iq;
2034 	poll_wait(file, &fiq->waitq, wait);
2035 
2036 	spin_lock(&fiq->waitq.lock);
2037 	if (!fiq->connected)
2038 		mask = EPOLLERR;
2039 	else if (request_pending(fiq))
2040 		mask |= EPOLLIN | EPOLLRDNORM;
2041 	spin_unlock(&fiq->waitq.lock);
2042 
2043 	return mask;
2044 }
2045 
2046 /*
2047  * Abort all requests on the given list (pending or processing)
2048  *
2049  * This function releases and reacquires fc->lock
2050  */
2051 static void end_requests(struct fuse_conn *fc, struct list_head *head)
2052 {
2053 	while (!list_empty(head)) {
2054 		struct fuse_req *req;
2055 		req = list_entry(head->next, struct fuse_req, list);
2056 		req->out.h.error = -ECONNABORTED;
2057 		clear_bit(FR_SENT, &req->flags);
2058 		list_del_init(&req->list);
2059 		request_end(fc, req);
2060 	}
2061 }
2062 
2063 static void end_polls(struct fuse_conn *fc)
2064 {
2065 	struct rb_node *p;
2066 
2067 	p = rb_first(&fc->polled_files);
2068 
2069 	while (p) {
2070 		struct fuse_file *ff;
2071 		ff = rb_entry(p, struct fuse_file, polled_node);
2072 		wake_up_interruptible_all(&ff->poll_wait);
2073 
2074 		p = rb_next(p);
2075 	}
2076 }
2077 
2078 /*
2079  * Abort all requests.
2080  *
2081  * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2082  * filesystem.
2083  *
2084  * The same effect is usually achievable through killing the filesystem daemon
2085  * and all users of the filesystem.  The exception is the combination of an
2086  * asynchronous request and the tricky deadlock (see
2087  * Documentation/filesystems/fuse.txt).
2088  *
2089  * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2090  * requests, they should be finished off immediately.  Locked requests will be
2091  * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2092  * requests.  It is possible that some request will finish before we can.  This
2093  * is OK, the request will in that case be removed from the list before we touch
2094  * it.
2095  */
2096 void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
2097 {
2098 	struct fuse_iqueue *fiq = &fc->iq;
2099 
2100 	spin_lock(&fc->lock);
2101 	if (fc->connected) {
2102 		struct fuse_dev *fud;
2103 		struct fuse_req *req, *next;
2104 		LIST_HEAD(to_end);
2105 
2106 		fc->connected = 0;
2107 		fc->blocked = 0;
2108 		fc->aborted = is_abort;
2109 		fuse_set_initialized(fc);
2110 		list_for_each_entry(fud, &fc->devices, entry) {
2111 			struct fuse_pqueue *fpq = &fud->pq;
2112 
2113 			spin_lock(&fpq->lock);
2114 			fpq->connected = 0;
2115 			list_for_each_entry_safe(req, next, &fpq->io, list) {
2116 				req->out.h.error = -ECONNABORTED;
2117 				spin_lock(&req->waitq.lock);
2118 				set_bit(FR_ABORTED, &req->flags);
2119 				if (!test_bit(FR_LOCKED, &req->flags)) {
2120 					set_bit(FR_PRIVATE, &req->flags);
2121 					__fuse_get_request(req);
2122 					list_move(&req->list, &to_end);
2123 				}
2124 				spin_unlock(&req->waitq.lock);
2125 			}
2126 			list_splice_tail_init(&fpq->processing, &to_end);
2127 			spin_unlock(&fpq->lock);
2128 		}
2129 		fc->max_background = UINT_MAX;
2130 		flush_bg_queue(fc);
2131 
2132 		spin_lock(&fiq->waitq.lock);
2133 		fiq->connected = 0;
2134 		list_for_each_entry(req, &fiq->pending, list)
2135 			clear_bit(FR_PENDING, &req->flags);
2136 		list_splice_tail_init(&fiq->pending, &to_end);
2137 		while (forget_pending(fiq))
2138 			kfree(dequeue_forget(fiq, 1, NULL));
2139 		wake_up_all_locked(&fiq->waitq);
2140 		spin_unlock(&fiq->waitq.lock);
2141 		kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2142 		end_polls(fc);
2143 		wake_up_all(&fc->blocked_waitq);
2144 		spin_unlock(&fc->lock);
2145 
2146 		end_requests(fc, &to_end);
2147 	} else {
2148 		spin_unlock(&fc->lock);
2149 	}
2150 }
2151 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2152 
2153 void fuse_wait_aborted(struct fuse_conn *fc)
2154 {
2155 	wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
2156 }
2157 
2158 int fuse_dev_release(struct inode *inode, struct file *file)
2159 {
2160 	struct fuse_dev *fud = fuse_get_dev(file);
2161 
2162 	if (fud) {
2163 		struct fuse_conn *fc = fud->fc;
2164 		struct fuse_pqueue *fpq = &fud->pq;
2165 		LIST_HEAD(to_end);
2166 
2167 		spin_lock(&fpq->lock);
2168 		WARN_ON(!list_empty(&fpq->io));
2169 		list_splice_init(&fpq->processing, &to_end);
2170 		spin_unlock(&fpq->lock);
2171 
2172 		end_requests(fc, &to_end);
2173 
2174 		/* Are we the last open device? */
2175 		if (atomic_dec_and_test(&fc->dev_count)) {
2176 			WARN_ON(fc->iq.fasync != NULL);
2177 			fuse_abort_conn(fc, false);
2178 		}
2179 		fuse_dev_free(fud);
2180 	}
2181 	return 0;
2182 }
2183 EXPORT_SYMBOL_GPL(fuse_dev_release);
2184 
2185 static int fuse_dev_fasync(int fd, struct file *file, int on)
2186 {
2187 	struct fuse_dev *fud = fuse_get_dev(file);
2188 
2189 	if (!fud)
2190 		return -EPERM;
2191 
2192 	/* No locking - fasync_helper does its own locking */
2193 	return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
2194 }
2195 
2196 static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
2197 {
2198 	struct fuse_dev *fud;
2199 
2200 	if (new->private_data)
2201 		return -EINVAL;
2202 
2203 	fud = fuse_dev_alloc(fc);
2204 	if (!fud)
2205 		return -ENOMEM;
2206 
2207 	new->private_data = fud;
2208 	atomic_inc(&fc->dev_count);
2209 
2210 	return 0;
2211 }
2212 
2213 static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2214 			   unsigned long arg)
2215 {
2216 	int err = -ENOTTY;
2217 
2218 	if (cmd == FUSE_DEV_IOC_CLONE) {
2219 		int oldfd;
2220 
2221 		err = -EFAULT;
2222 		if (!get_user(oldfd, (__u32 __user *) arg)) {
2223 			struct file *old = fget(oldfd);
2224 
2225 			err = -EINVAL;
2226 			if (old) {
2227 				struct fuse_dev *fud = NULL;
2228 
2229 				/*
2230 				 * Check against file->f_op because CUSE
2231 				 * uses the same ioctl handler.
2232 				 */
2233 				if (old->f_op == file->f_op &&
2234 				    old->f_cred->user_ns == file->f_cred->user_ns)
2235 					fud = fuse_get_dev(old);
2236 
2237 				if (fud) {
2238 					mutex_lock(&fuse_mutex);
2239 					err = fuse_device_clone(fud->fc, file);
2240 					mutex_unlock(&fuse_mutex);
2241 				}
2242 				fput(old);
2243 			}
2244 		}
2245 	}
2246 	return err;
2247 }
2248 
2249 const struct file_operations fuse_dev_operations = {
2250 	.owner		= THIS_MODULE,
2251 	.open		= fuse_dev_open,
2252 	.llseek		= no_llseek,
2253 	.read_iter	= fuse_dev_read,
2254 	.splice_read	= fuse_dev_splice_read,
2255 	.write_iter	= fuse_dev_write,
2256 	.splice_write	= fuse_dev_splice_write,
2257 	.poll		= fuse_dev_poll,
2258 	.release	= fuse_dev_release,
2259 	.fasync		= fuse_dev_fasync,
2260 	.unlocked_ioctl = fuse_dev_ioctl,
2261 	.compat_ioctl   = fuse_dev_ioctl,
2262 };
2263 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2264 
2265 static struct miscdevice fuse_miscdevice = {
2266 	.minor = FUSE_MINOR,
2267 	.name  = "fuse",
2268 	.fops = &fuse_dev_operations,
2269 };
2270 
2271 int __init fuse_dev_init(void)
2272 {
2273 	int err = -ENOMEM;
2274 	fuse_req_cachep = kmem_cache_create("fuse_request",
2275 					    sizeof(struct fuse_req),
2276 					    0, 0, NULL);
2277 	if (!fuse_req_cachep)
2278 		goto out;
2279 
2280 	err = misc_register(&fuse_miscdevice);
2281 	if (err)
2282 		goto out_cache_clean;
2283 
2284 	return 0;
2285 
2286  out_cache_clean:
2287 	kmem_cache_destroy(fuse_req_cachep);
2288  out:
2289 	return err;
2290 }
2291 
2292 void fuse_dev_cleanup(void)
2293 {
2294 	misc_deregister(&fuse_miscdevice);
2295 	kmem_cache_destroy(fuse_req_cachep);
2296 }
2297