1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/sched/signal.h>
15 #include <linux/uio.h>
16 #include <linux/miscdevice.h>
17 #include <linux/pagemap.h>
18 #include <linux/file.h>
19 #include <linux/slab.h>
20 #include <linux/pipe_fs_i.h>
21 #include <linux/swap.h>
22 #include <linux/splice.h>
23 #include <linux/sched.h>
24
25 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
26 MODULE_ALIAS("devname:fuse");
27
28 /* Ordinary requests have even IDs, while interrupts IDs are odd */
29 #define FUSE_INT_REQ_BIT (1ULL << 0)
30 #define FUSE_REQ_ID_STEP (1ULL << 1)
31
32 static struct kmem_cache *fuse_req_cachep;
33
fuse_get_dev(struct file * file)34 static struct fuse_dev *fuse_get_dev(struct file *file)
35 {
36 /*
37 * Lockless access is OK, because file->private data is set
38 * once during mount and is valid until the file is released.
39 */
40 return READ_ONCE(file->private_data);
41 }
42
fuse_request_init(struct fuse_mount * fm,struct fuse_req * req)43 static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
44 {
45 INIT_LIST_HEAD(&req->list);
46 INIT_LIST_HEAD(&req->intr_entry);
47 init_waitqueue_head(&req->waitq);
48 refcount_set(&req->count, 1);
49 __set_bit(FR_PENDING, &req->flags);
50 req->fm = fm;
51 }
52
fuse_request_alloc(struct fuse_mount * fm,gfp_t flags)53 static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags)
54 {
55 struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
56 if (req)
57 fuse_request_init(fm, req);
58
59 return req;
60 }
61
fuse_request_free(struct fuse_req * req)62 static void fuse_request_free(struct fuse_req *req)
63 {
64 kmem_cache_free(fuse_req_cachep, req);
65 }
66
__fuse_get_request(struct fuse_req * req)67 static void __fuse_get_request(struct fuse_req *req)
68 {
69 refcount_inc(&req->count);
70 }
71
72 /* Must be called with > 1 refcount */
__fuse_put_request(struct fuse_req * req)73 static void __fuse_put_request(struct fuse_req *req)
74 {
75 refcount_dec(&req->count);
76 }
77
fuse_set_initialized(struct fuse_conn * fc)78 void fuse_set_initialized(struct fuse_conn *fc)
79 {
80 /* Make sure stores before this are seen on another CPU */
81 smp_wmb();
82 fc->initialized = 1;
83 }
84
fuse_block_alloc(struct fuse_conn * fc,bool for_background)85 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
86 {
87 return !fc->initialized || (for_background && fc->blocked);
88 }
89
fuse_drop_waiting(struct fuse_conn * fc)90 static void fuse_drop_waiting(struct fuse_conn *fc)
91 {
92 /*
93 * lockess check of fc->connected is okay, because atomic_dec_and_test()
94 * provides a memory barrier matched with the one in fuse_wait_aborted()
95 * to ensure no wake-up is missed.
96 */
97 if (atomic_dec_and_test(&fc->num_waiting) &&
98 !READ_ONCE(fc->connected)) {
99 /* wake up aborters */
100 wake_up_all(&fc->blocked_waitq);
101 }
102 }
103
104 static void fuse_put_request(struct fuse_req *req);
105
fuse_get_req(struct fuse_mount * fm,bool for_background)106 static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background)
107 {
108 struct fuse_conn *fc = fm->fc;
109 struct fuse_req *req;
110 int err;
111 atomic_inc(&fc->num_waiting);
112
113 if (fuse_block_alloc(fc, for_background)) {
114 err = -EINTR;
115 if (wait_event_killable_exclusive(fc->blocked_waitq,
116 !fuse_block_alloc(fc, for_background)))
117 goto out;
118 }
119 /* Matches smp_wmb() in fuse_set_initialized() */
120 smp_rmb();
121
122 err = -ENOTCONN;
123 if (!fc->connected)
124 goto out;
125
126 err = -ECONNREFUSED;
127 if (fc->conn_error)
128 goto out;
129
130 req = fuse_request_alloc(fm, GFP_KERNEL);
131 err = -ENOMEM;
132 if (!req) {
133 if (for_background)
134 wake_up(&fc->blocked_waitq);
135 goto out;
136 }
137
138 req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
139 req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
140 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
141
142 __set_bit(FR_WAITING, &req->flags);
143 if (for_background)
144 __set_bit(FR_BACKGROUND, &req->flags);
145
146 if (unlikely(req->in.h.uid == ((uid_t)-1) ||
147 req->in.h.gid == ((gid_t)-1))) {
148 fuse_put_request(req);
149 return ERR_PTR(-EOVERFLOW);
150 }
151 return req;
152
153 out:
154 fuse_drop_waiting(fc);
155 return ERR_PTR(err);
156 }
157
fuse_put_request(struct fuse_req * req)158 static void fuse_put_request(struct fuse_req *req)
159 {
160 struct fuse_conn *fc = req->fm->fc;
161
162 if (refcount_dec_and_test(&req->count)) {
163 if (test_bit(FR_BACKGROUND, &req->flags)) {
164 /*
165 * We get here in the unlikely case that a background
166 * request was allocated but not sent
167 */
168 spin_lock(&fc->bg_lock);
169 if (!fc->blocked)
170 wake_up(&fc->blocked_waitq);
171 spin_unlock(&fc->bg_lock);
172 }
173
174 if (test_bit(FR_WAITING, &req->flags)) {
175 __clear_bit(FR_WAITING, &req->flags);
176 fuse_drop_waiting(fc);
177 }
178
179 fuse_request_free(req);
180 }
181 }
182
fuse_len_args(unsigned int numargs,struct fuse_arg * args)183 unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
184 {
185 unsigned nbytes = 0;
186 unsigned i;
187
188 for (i = 0; i < numargs; i++)
189 nbytes += args[i].size;
190
191 return nbytes;
192 }
193 EXPORT_SYMBOL_GPL(fuse_len_args);
194
fuse_get_unique(struct fuse_iqueue * fiq)195 u64 fuse_get_unique(struct fuse_iqueue *fiq)
196 {
197 fiq->reqctr += FUSE_REQ_ID_STEP;
198 return fiq->reqctr;
199 }
200 EXPORT_SYMBOL_GPL(fuse_get_unique);
201
fuse_req_hash(u64 unique)202 static unsigned int fuse_req_hash(u64 unique)
203 {
204 return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS);
205 }
206
207 /*
208 * A new request is available, wake fiq->waitq
209 */
fuse_dev_wake_and_unlock(struct fuse_iqueue * fiq)210 static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq)
211 __releases(fiq->lock)
212 {
213 wake_up(&fiq->waitq);
214 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
215 spin_unlock(&fiq->lock);
216 }
217
218 const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
219 .wake_forget_and_unlock = fuse_dev_wake_and_unlock,
220 .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock,
221 .wake_pending_and_unlock = fuse_dev_wake_and_unlock,
222 };
223 EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
224
queue_request_and_unlock(struct fuse_iqueue * fiq,struct fuse_req * req)225 static void queue_request_and_unlock(struct fuse_iqueue *fiq,
226 struct fuse_req *req)
227 __releases(fiq->lock)
228 {
229 req->in.h.len = sizeof(struct fuse_in_header) +
230 fuse_len_args(req->args->in_numargs,
231 (struct fuse_arg *) req->args->in_args);
232 list_add_tail(&req->list, &fiq->pending);
233 fiq->ops->wake_pending_and_unlock(fiq);
234 }
235
fuse_queue_forget(struct fuse_conn * fc,struct fuse_forget_link * forget,u64 nodeid,u64 nlookup)236 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
237 u64 nodeid, u64 nlookup)
238 {
239 struct fuse_iqueue *fiq = &fc->iq;
240
241 forget->forget_one.nodeid = nodeid;
242 forget->forget_one.nlookup = nlookup;
243
244 spin_lock(&fiq->lock);
245 if (fiq->connected) {
246 fiq->forget_list_tail->next = forget;
247 fiq->forget_list_tail = forget;
248 fiq->ops->wake_forget_and_unlock(fiq);
249 } else {
250 kfree(forget);
251 spin_unlock(&fiq->lock);
252 }
253 }
254
flush_bg_queue(struct fuse_conn * fc)255 static void flush_bg_queue(struct fuse_conn *fc)
256 {
257 struct fuse_iqueue *fiq = &fc->iq;
258
259 while (fc->active_background < fc->max_background &&
260 !list_empty(&fc->bg_queue)) {
261 struct fuse_req *req;
262
263 req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
264 list_del(&req->list);
265 fc->active_background++;
266 spin_lock(&fiq->lock);
267 req->in.h.unique = fuse_get_unique(fiq);
268 queue_request_and_unlock(fiq, req);
269 }
270 }
271
272 /*
273 * This function is called when a request is finished. Either a reply
274 * has arrived or it was aborted (and not yet sent) or some error
275 * occurred during communication with userspace, or the device file
276 * was closed. The requester thread is woken up (if still waiting),
277 * the 'end' callback is called if given, else the reference to the
278 * request is released
279 */
fuse_request_end(struct fuse_req * req)280 void fuse_request_end(struct fuse_req *req)
281 {
282 struct fuse_mount *fm = req->fm;
283 struct fuse_conn *fc = fm->fc;
284 struct fuse_iqueue *fiq = &fc->iq;
285
286 if (test_and_set_bit(FR_FINISHED, &req->flags))
287 goto put_request;
288
289 /*
290 * test_and_set_bit() implies smp_mb() between bit
291 * changing and below FR_INTERRUPTED check. Pairs with
292 * smp_mb() from queue_interrupt().
293 */
294 if (test_bit(FR_INTERRUPTED, &req->flags)) {
295 spin_lock(&fiq->lock);
296 list_del_init(&req->intr_entry);
297 spin_unlock(&fiq->lock);
298 }
299 WARN_ON(test_bit(FR_PENDING, &req->flags));
300 WARN_ON(test_bit(FR_SENT, &req->flags));
301 if (test_bit(FR_BACKGROUND, &req->flags)) {
302 spin_lock(&fc->bg_lock);
303 clear_bit(FR_BACKGROUND, &req->flags);
304 if (fc->num_background == fc->max_background) {
305 fc->blocked = 0;
306 wake_up(&fc->blocked_waitq);
307 } else if (!fc->blocked) {
308 /*
309 * Wake up next waiter, if any. It's okay to use
310 * waitqueue_active(), as we've already synced up
311 * fc->blocked with waiters with the wake_up() call
312 * above.
313 */
314 if (waitqueue_active(&fc->blocked_waitq))
315 wake_up(&fc->blocked_waitq);
316 }
317
318 fc->num_background--;
319 fc->active_background--;
320 flush_bg_queue(fc);
321 spin_unlock(&fc->bg_lock);
322 } else {
323 /* Wake up waiter sleeping in request_wait_answer() */
324 wake_up(&req->waitq);
325 }
326
327 if (test_bit(FR_ASYNC, &req->flags))
328 req->args->end(fm, req->args, req->out.h.error);
329 put_request:
330 fuse_put_request(req);
331 }
332 EXPORT_SYMBOL_GPL(fuse_request_end);
333
queue_interrupt(struct fuse_req * req)334 static int queue_interrupt(struct fuse_req *req)
335 {
336 struct fuse_iqueue *fiq = &req->fm->fc->iq;
337
338 spin_lock(&fiq->lock);
339 /* Check for we've sent request to interrupt this req */
340 if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
341 spin_unlock(&fiq->lock);
342 return -EINVAL;
343 }
344
345 if (list_empty(&req->intr_entry)) {
346 list_add_tail(&req->intr_entry, &fiq->interrupts);
347 /*
348 * Pairs with smp_mb() implied by test_and_set_bit()
349 * from fuse_request_end().
350 */
351 smp_mb();
352 if (test_bit(FR_FINISHED, &req->flags)) {
353 list_del_init(&req->intr_entry);
354 spin_unlock(&fiq->lock);
355 return 0;
356 }
357 fiq->ops->wake_interrupt_and_unlock(fiq);
358 } else {
359 spin_unlock(&fiq->lock);
360 }
361 return 0;
362 }
363
request_wait_answer(struct fuse_req * req)364 static void request_wait_answer(struct fuse_req *req)
365 {
366 struct fuse_conn *fc = req->fm->fc;
367 struct fuse_iqueue *fiq = &fc->iq;
368 int err;
369
370 if (!fc->no_interrupt) {
371 /* Any signal may interrupt this */
372 err = wait_event_interruptible(req->waitq,
373 test_bit(FR_FINISHED, &req->flags));
374 if (!err)
375 return;
376
377 set_bit(FR_INTERRUPTED, &req->flags);
378 /* matches barrier in fuse_dev_do_read() */
379 smp_mb__after_atomic();
380 if (test_bit(FR_SENT, &req->flags))
381 queue_interrupt(req);
382 }
383
384 if (!test_bit(FR_FORCE, &req->flags)) {
385 /* Only fatal signals may interrupt this */
386 err = wait_event_killable(req->waitq,
387 test_bit(FR_FINISHED, &req->flags));
388 if (!err)
389 return;
390
391 spin_lock(&fiq->lock);
392 /* Request is not yet in userspace, bail out */
393 if (test_bit(FR_PENDING, &req->flags)) {
394 list_del(&req->list);
395 spin_unlock(&fiq->lock);
396 __fuse_put_request(req);
397 req->out.h.error = -EINTR;
398 return;
399 }
400 spin_unlock(&fiq->lock);
401 }
402
403 /*
404 * Either request is already in userspace, or it was forced.
405 * Wait it out.
406 */
407 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
408 }
409
__fuse_request_send(struct fuse_req * req)410 static void __fuse_request_send(struct fuse_req *req)
411 {
412 struct fuse_iqueue *fiq = &req->fm->fc->iq;
413
414 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
415 spin_lock(&fiq->lock);
416 if (!fiq->connected) {
417 spin_unlock(&fiq->lock);
418 req->out.h.error = -ENOTCONN;
419 } else {
420 req->in.h.unique = fuse_get_unique(fiq);
421 /* acquire extra reference, since request is still needed
422 after fuse_request_end() */
423 __fuse_get_request(req);
424 queue_request_and_unlock(fiq, req);
425
426 request_wait_answer(req);
427 /* Pairs with smp_wmb() in fuse_request_end() */
428 smp_rmb();
429 }
430 }
431
fuse_adjust_compat(struct fuse_conn * fc,struct fuse_args * args)432 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
433 {
434 if (fc->minor < 4 && args->opcode == FUSE_STATFS)
435 args->out_args[0].size = FUSE_COMPAT_STATFS_SIZE;
436
437 if (fc->minor < 9) {
438 switch (args->opcode) {
439 case FUSE_LOOKUP:
440 case FUSE_CREATE:
441 case FUSE_MKNOD:
442 case FUSE_MKDIR:
443 case FUSE_SYMLINK:
444 case FUSE_LINK:
445 args->out_args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
446 break;
447 case FUSE_GETATTR:
448 case FUSE_SETATTR:
449 args->out_args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
450 break;
451 }
452 }
453 if (fc->minor < 12) {
454 switch (args->opcode) {
455 case FUSE_CREATE:
456 args->in_args[0].size = sizeof(struct fuse_open_in);
457 break;
458 case FUSE_MKNOD:
459 args->in_args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
460 break;
461 }
462 }
463 }
464
fuse_force_creds(struct fuse_req * req)465 static void fuse_force_creds(struct fuse_req *req)
466 {
467 struct fuse_conn *fc = req->fm->fc;
468
469 req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
470 req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
471 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
472 }
473
fuse_args_to_req(struct fuse_req * req,struct fuse_args * args)474 static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
475 {
476 req->in.h.opcode = args->opcode;
477 req->in.h.nodeid = args->nodeid;
478 req->args = args;
479 if (args->is_ext)
480 req->in.h.total_extlen = args->in_args[args->ext_idx].size / 8;
481 if (args->end)
482 __set_bit(FR_ASYNC, &req->flags);
483 }
484
fuse_simple_request(struct fuse_mount * fm,struct fuse_args * args)485 ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
486 {
487 struct fuse_conn *fc = fm->fc;
488 struct fuse_req *req;
489 ssize_t ret;
490
491 if (args->force) {
492 atomic_inc(&fc->num_waiting);
493 req = fuse_request_alloc(fm, GFP_KERNEL | __GFP_NOFAIL);
494
495 if (!args->nocreds)
496 fuse_force_creds(req);
497
498 __set_bit(FR_WAITING, &req->flags);
499 __set_bit(FR_FORCE, &req->flags);
500 } else {
501 WARN_ON(args->nocreds);
502 req = fuse_get_req(fm, false);
503 if (IS_ERR(req))
504 return PTR_ERR(req);
505 }
506
507 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
508 fuse_adjust_compat(fc, args);
509 fuse_args_to_req(req, args);
510
511 if (!args->noreply)
512 __set_bit(FR_ISREPLY, &req->flags);
513 __fuse_request_send(req);
514 ret = req->out.h.error;
515 if (!ret && args->out_argvar) {
516 BUG_ON(args->out_numargs == 0);
517 ret = args->out_args[args->out_numargs - 1].size;
518 }
519 fuse_put_request(req);
520
521 return ret;
522 }
523
fuse_request_queue_background(struct fuse_req * req)524 static bool fuse_request_queue_background(struct fuse_req *req)
525 {
526 struct fuse_mount *fm = req->fm;
527 struct fuse_conn *fc = fm->fc;
528 bool queued = false;
529
530 WARN_ON(!test_bit(FR_BACKGROUND, &req->flags));
531 if (!test_bit(FR_WAITING, &req->flags)) {
532 __set_bit(FR_WAITING, &req->flags);
533 atomic_inc(&fc->num_waiting);
534 }
535 __set_bit(FR_ISREPLY, &req->flags);
536 spin_lock(&fc->bg_lock);
537 if (likely(fc->connected)) {
538 fc->num_background++;
539 if (fc->num_background == fc->max_background)
540 fc->blocked = 1;
541 list_add_tail(&req->list, &fc->bg_queue);
542 flush_bg_queue(fc);
543 queued = true;
544 }
545 spin_unlock(&fc->bg_lock);
546
547 return queued;
548 }
549
fuse_simple_background(struct fuse_mount * fm,struct fuse_args * args,gfp_t gfp_flags)550 int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args,
551 gfp_t gfp_flags)
552 {
553 struct fuse_req *req;
554
555 if (args->force) {
556 WARN_ON(!args->nocreds);
557 req = fuse_request_alloc(fm, gfp_flags);
558 if (!req)
559 return -ENOMEM;
560 __set_bit(FR_BACKGROUND, &req->flags);
561 } else {
562 WARN_ON(args->nocreds);
563 req = fuse_get_req(fm, true);
564 if (IS_ERR(req))
565 return PTR_ERR(req);
566 }
567
568 fuse_args_to_req(req, args);
569
570 if (!fuse_request_queue_background(req)) {
571 fuse_put_request(req);
572 return -ENOTCONN;
573 }
574
575 return 0;
576 }
577 EXPORT_SYMBOL_GPL(fuse_simple_background);
578
fuse_simple_notify_reply(struct fuse_mount * fm,struct fuse_args * args,u64 unique)579 static int fuse_simple_notify_reply(struct fuse_mount *fm,
580 struct fuse_args *args, u64 unique)
581 {
582 struct fuse_req *req;
583 struct fuse_iqueue *fiq = &fm->fc->iq;
584 int err = 0;
585
586 req = fuse_get_req(fm, false);
587 if (IS_ERR(req))
588 return PTR_ERR(req);
589
590 __clear_bit(FR_ISREPLY, &req->flags);
591 req->in.h.unique = unique;
592
593 fuse_args_to_req(req, args);
594
595 spin_lock(&fiq->lock);
596 if (fiq->connected) {
597 queue_request_and_unlock(fiq, req);
598 } else {
599 err = -ENODEV;
600 spin_unlock(&fiq->lock);
601 fuse_put_request(req);
602 }
603
604 return err;
605 }
606
607 /*
608 * Lock the request. Up to the next unlock_request() there mustn't be
609 * anything that could cause a page-fault. If the request was already
610 * aborted bail out.
611 */
lock_request(struct fuse_req * req)612 static int lock_request(struct fuse_req *req)
613 {
614 int err = 0;
615 if (req) {
616 spin_lock(&req->waitq.lock);
617 if (test_bit(FR_ABORTED, &req->flags))
618 err = -ENOENT;
619 else
620 set_bit(FR_LOCKED, &req->flags);
621 spin_unlock(&req->waitq.lock);
622 }
623 return err;
624 }
625
626 /*
627 * Unlock request. If it was aborted while locked, caller is responsible
628 * for unlocking and ending the request.
629 */
unlock_request(struct fuse_req * req)630 static int unlock_request(struct fuse_req *req)
631 {
632 int err = 0;
633 if (req) {
634 spin_lock(&req->waitq.lock);
635 if (test_bit(FR_ABORTED, &req->flags))
636 err = -ENOENT;
637 else
638 clear_bit(FR_LOCKED, &req->flags);
639 spin_unlock(&req->waitq.lock);
640 }
641 return err;
642 }
643
644 struct fuse_copy_state {
645 int write;
646 struct fuse_req *req;
647 struct iov_iter *iter;
648 struct pipe_buffer *pipebufs;
649 struct pipe_buffer *currbuf;
650 struct pipe_inode_info *pipe;
651 unsigned long nr_segs;
652 struct page *pg;
653 unsigned len;
654 unsigned offset;
655 unsigned move_pages:1;
656 };
657
fuse_copy_init(struct fuse_copy_state * cs,int write,struct iov_iter * iter)658 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
659 struct iov_iter *iter)
660 {
661 memset(cs, 0, sizeof(*cs));
662 cs->write = write;
663 cs->iter = iter;
664 }
665
666 /* Unmap and put previous page of userspace buffer */
fuse_copy_finish(struct fuse_copy_state * cs)667 static void fuse_copy_finish(struct fuse_copy_state *cs)
668 {
669 if (cs->currbuf) {
670 struct pipe_buffer *buf = cs->currbuf;
671
672 if (cs->write)
673 buf->len = PAGE_SIZE - cs->len;
674 cs->currbuf = NULL;
675 } else if (cs->pg) {
676 if (cs->write) {
677 flush_dcache_page(cs->pg);
678 set_page_dirty_lock(cs->pg);
679 }
680 put_page(cs->pg);
681 }
682 cs->pg = NULL;
683 }
684
685 /*
686 * Get another pagefull of userspace buffer, and map it to kernel
687 * address space, and lock request
688 */
fuse_copy_fill(struct fuse_copy_state * cs)689 static int fuse_copy_fill(struct fuse_copy_state *cs)
690 {
691 struct page *page;
692 int err;
693
694 err = unlock_request(cs->req);
695 if (err)
696 return err;
697
698 fuse_copy_finish(cs);
699 if (cs->pipebufs) {
700 struct pipe_buffer *buf = cs->pipebufs;
701
702 if (!cs->write) {
703 err = pipe_buf_confirm(cs->pipe, buf);
704 if (err)
705 return err;
706
707 BUG_ON(!cs->nr_segs);
708 cs->currbuf = buf;
709 cs->pg = buf->page;
710 cs->offset = buf->offset;
711 cs->len = buf->len;
712 cs->pipebufs++;
713 cs->nr_segs--;
714 } else {
715 if (cs->nr_segs >= cs->pipe->max_usage)
716 return -EIO;
717
718 page = alloc_page(GFP_HIGHUSER);
719 if (!page)
720 return -ENOMEM;
721
722 buf->page = page;
723 buf->offset = 0;
724 buf->len = 0;
725
726 cs->currbuf = buf;
727 cs->pg = page;
728 cs->offset = 0;
729 cs->len = PAGE_SIZE;
730 cs->pipebufs++;
731 cs->nr_segs++;
732 }
733 } else {
734 size_t off;
735 err = iov_iter_get_pages2(cs->iter, &page, PAGE_SIZE, 1, &off);
736 if (err < 0)
737 return err;
738 BUG_ON(!err);
739 cs->len = err;
740 cs->offset = off;
741 cs->pg = page;
742 }
743
744 return lock_request(cs->req);
745 }
746
747 /* Do as much copy to/from userspace buffer as we can */
fuse_copy_do(struct fuse_copy_state * cs,void ** val,unsigned * size)748 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
749 {
750 unsigned ncpy = min(*size, cs->len);
751 if (val) {
752 void *pgaddr = kmap_local_page(cs->pg);
753 void *buf = pgaddr + cs->offset;
754
755 if (cs->write)
756 memcpy(buf, *val, ncpy);
757 else
758 memcpy(*val, buf, ncpy);
759
760 kunmap_local(pgaddr);
761 *val += ncpy;
762 }
763 *size -= ncpy;
764 cs->len -= ncpy;
765 cs->offset += ncpy;
766 return ncpy;
767 }
768
fuse_check_folio(struct folio * folio)769 static int fuse_check_folio(struct folio *folio)
770 {
771 if (folio_mapped(folio) ||
772 folio->mapping != NULL ||
773 (folio->flags & PAGE_FLAGS_CHECK_AT_PREP &
774 ~(1 << PG_locked |
775 1 << PG_referenced |
776 1 << PG_uptodate |
777 1 << PG_lru |
778 1 << PG_active |
779 1 << PG_workingset |
780 1 << PG_reclaim |
781 1 << PG_waiters |
782 LRU_GEN_MASK | LRU_REFS_MASK))) {
783 dump_page(&folio->page, "fuse: trying to steal weird page");
784 return 1;
785 }
786 return 0;
787 }
788
fuse_try_move_page(struct fuse_copy_state * cs,struct page ** pagep)789 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
790 {
791 int err;
792 struct folio *oldfolio = page_folio(*pagep);
793 struct folio *newfolio;
794 struct pipe_buffer *buf = cs->pipebufs;
795
796 folio_get(oldfolio);
797 err = unlock_request(cs->req);
798 if (err)
799 goto out_put_old;
800
801 fuse_copy_finish(cs);
802
803 err = pipe_buf_confirm(cs->pipe, buf);
804 if (err)
805 goto out_put_old;
806
807 BUG_ON(!cs->nr_segs);
808 cs->currbuf = buf;
809 cs->len = buf->len;
810 cs->pipebufs++;
811 cs->nr_segs--;
812
813 if (cs->len != PAGE_SIZE)
814 goto out_fallback;
815
816 if (!pipe_buf_try_steal(cs->pipe, buf))
817 goto out_fallback;
818
819 newfolio = page_folio(buf->page);
820
821 if (!folio_test_uptodate(newfolio))
822 folio_mark_uptodate(newfolio);
823
824 folio_clear_mappedtodisk(newfolio);
825
826 if (fuse_check_folio(newfolio) != 0)
827 goto out_fallback_unlock;
828
829 /*
830 * This is a new and locked page, it shouldn't be mapped or
831 * have any special flags on it
832 */
833 if (WARN_ON(folio_mapped(oldfolio)))
834 goto out_fallback_unlock;
835 if (WARN_ON(folio_has_private(oldfolio)))
836 goto out_fallback_unlock;
837 if (WARN_ON(folio_test_dirty(oldfolio) ||
838 folio_test_writeback(oldfolio)))
839 goto out_fallback_unlock;
840 if (WARN_ON(folio_test_mlocked(oldfolio)))
841 goto out_fallback_unlock;
842
843 replace_page_cache_folio(oldfolio, newfolio);
844
845 folio_get(newfolio);
846
847 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
848 folio_add_lru(newfolio);
849
850 /*
851 * Release while we have extra ref on stolen page. Otherwise
852 * anon_pipe_buf_release() might think the page can be reused.
853 */
854 pipe_buf_release(cs->pipe, buf);
855
856 err = 0;
857 spin_lock(&cs->req->waitq.lock);
858 if (test_bit(FR_ABORTED, &cs->req->flags))
859 err = -ENOENT;
860 else
861 *pagep = &newfolio->page;
862 spin_unlock(&cs->req->waitq.lock);
863
864 if (err) {
865 folio_unlock(newfolio);
866 folio_put(newfolio);
867 goto out_put_old;
868 }
869
870 folio_unlock(oldfolio);
871 /* Drop ref for ap->pages[] array */
872 folio_put(oldfolio);
873 cs->len = 0;
874
875 err = 0;
876 out_put_old:
877 /* Drop ref obtained in this function */
878 folio_put(oldfolio);
879 return err;
880
881 out_fallback_unlock:
882 folio_unlock(newfolio);
883 out_fallback:
884 cs->pg = buf->page;
885 cs->offset = buf->offset;
886
887 err = lock_request(cs->req);
888 if (!err)
889 err = 1;
890
891 goto out_put_old;
892 }
893
fuse_ref_page(struct fuse_copy_state * cs,struct page * page,unsigned offset,unsigned count)894 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
895 unsigned offset, unsigned count)
896 {
897 struct pipe_buffer *buf;
898 int err;
899
900 if (cs->nr_segs >= cs->pipe->max_usage)
901 return -EIO;
902
903 get_page(page);
904 err = unlock_request(cs->req);
905 if (err) {
906 put_page(page);
907 return err;
908 }
909
910 fuse_copy_finish(cs);
911
912 buf = cs->pipebufs;
913 buf->page = page;
914 buf->offset = offset;
915 buf->len = count;
916
917 cs->pipebufs++;
918 cs->nr_segs++;
919 cs->len = 0;
920
921 return 0;
922 }
923
924 /*
925 * Copy a page in the request to/from the userspace buffer. Must be
926 * done atomically
927 */
fuse_copy_page(struct fuse_copy_state * cs,struct page ** pagep,unsigned offset,unsigned count,int zeroing)928 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
929 unsigned offset, unsigned count, int zeroing)
930 {
931 int err;
932 struct page *page = *pagep;
933
934 if (page && zeroing && count < PAGE_SIZE)
935 clear_highpage(page);
936
937 while (count) {
938 if (cs->write && cs->pipebufs && page) {
939 /*
940 * Can't control lifetime of pipe buffers, so always
941 * copy user pages.
942 */
943 if (cs->req->args->user_pages) {
944 err = fuse_copy_fill(cs);
945 if (err)
946 return err;
947 } else {
948 return fuse_ref_page(cs, page, offset, count);
949 }
950 } else if (!cs->len) {
951 if (cs->move_pages && page &&
952 offset == 0 && count == PAGE_SIZE) {
953 err = fuse_try_move_page(cs, pagep);
954 if (err <= 0)
955 return err;
956 } else {
957 err = fuse_copy_fill(cs);
958 if (err)
959 return err;
960 }
961 }
962 if (page) {
963 void *mapaddr = kmap_local_page(page);
964 void *buf = mapaddr + offset;
965 offset += fuse_copy_do(cs, &buf, &count);
966 kunmap_local(mapaddr);
967 } else
968 offset += fuse_copy_do(cs, NULL, &count);
969 }
970 if (page && !cs->write)
971 flush_dcache_page(page);
972 return 0;
973 }
974
975 /* Copy pages in the request to/from userspace buffer */
fuse_copy_pages(struct fuse_copy_state * cs,unsigned nbytes,int zeroing)976 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
977 int zeroing)
978 {
979 unsigned i;
980 struct fuse_req *req = cs->req;
981 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
982
983
984 for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) {
985 int err;
986 unsigned int offset = ap->descs[i].offset;
987 unsigned int count = min(nbytes, ap->descs[i].length);
988
989 err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing);
990 if (err)
991 return err;
992
993 nbytes -= count;
994 }
995 return 0;
996 }
997
998 /* Copy a single argument in the request to/from userspace buffer */
fuse_copy_one(struct fuse_copy_state * cs,void * val,unsigned size)999 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1000 {
1001 while (size) {
1002 if (!cs->len) {
1003 int err = fuse_copy_fill(cs);
1004 if (err)
1005 return err;
1006 }
1007 fuse_copy_do(cs, &val, &size);
1008 }
1009 return 0;
1010 }
1011
1012 /* Copy request arguments to/from userspace buffer */
fuse_copy_args(struct fuse_copy_state * cs,unsigned numargs,unsigned argpages,struct fuse_arg * args,int zeroing)1013 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1014 unsigned argpages, struct fuse_arg *args,
1015 int zeroing)
1016 {
1017 int err = 0;
1018 unsigned i;
1019
1020 for (i = 0; !err && i < numargs; i++) {
1021 struct fuse_arg *arg = &args[i];
1022 if (i == numargs - 1 && argpages)
1023 err = fuse_copy_pages(cs, arg->size, zeroing);
1024 else
1025 err = fuse_copy_one(cs, arg->value, arg->size);
1026 }
1027 return err;
1028 }
1029
forget_pending(struct fuse_iqueue * fiq)1030 static int forget_pending(struct fuse_iqueue *fiq)
1031 {
1032 return fiq->forget_list_head.next != NULL;
1033 }
1034
request_pending(struct fuse_iqueue * fiq)1035 static int request_pending(struct fuse_iqueue *fiq)
1036 {
1037 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1038 forget_pending(fiq);
1039 }
1040
1041 /*
1042 * Transfer an interrupt request to userspace
1043 *
1044 * Unlike other requests this is assembled on demand, without a need
1045 * to allocate a separate fuse_req structure.
1046 *
1047 * Called with fiq->lock held, releases it
1048 */
fuse_read_interrupt(struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes,struct fuse_req * req)1049 static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1050 struct fuse_copy_state *cs,
1051 size_t nbytes, struct fuse_req *req)
1052 __releases(fiq->lock)
1053 {
1054 struct fuse_in_header ih;
1055 struct fuse_interrupt_in arg;
1056 unsigned reqsize = sizeof(ih) + sizeof(arg);
1057 int err;
1058
1059 list_del_init(&req->intr_entry);
1060 memset(&ih, 0, sizeof(ih));
1061 memset(&arg, 0, sizeof(arg));
1062 ih.len = reqsize;
1063 ih.opcode = FUSE_INTERRUPT;
1064 ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
1065 arg.unique = req->in.h.unique;
1066
1067 spin_unlock(&fiq->lock);
1068 if (nbytes < reqsize)
1069 return -EINVAL;
1070
1071 err = fuse_copy_one(cs, &ih, sizeof(ih));
1072 if (!err)
1073 err = fuse_copy_one(cs, &arg, sizeof(arg));
1074 fuse_copy_finish(cs);
1075
1076 return err ? err : reqsize;
1077 }
1078
fuse_dequeue_forget(struct fuse_iqueue * fiq,unsigned int max,unsigned int * countp)1079 struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
1080 unsigned int max,
1081 unsigned int *countp)
1082 {
1083 struct fuse_forget_link *head = fiq->forget_list_head.next;
1084 struct fuse_forget_link **newhead = &head;
1085 unsigned count;
1086
1087 for (count = 0; *newhead != NULL && count < max; count++)
1088 newhead = &(*newhead)->next;
1089
1090 fiq->forget_list_head.next = *newhead;
1091 *newhead = NULL;
1092 if (fiq->forget_list_head.next == NULL)
1093 fiq->forget_list_tail = &fiq->forget_list_head;
1094
1095 if (countp != NULL)
1096 *countp = count;
1097
1098 return head;
1099 }
1100 EXPORT_SYMBOL(fuse_dequeue_forget);
1101
fuse_read_single_forget(struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes)1102 static int fuse_read_single_forget(struct fuse_iqueue *fiq,
1103 struct fuse_copy_state *cs,
1104 size_t nbytes)
1105 __releases(fiq->lock)
1106 {
1107 int err;
1108 struct fuse_forget_link *forget = fuse_dequeue_forget(fiq, 1, NULL);
1109 struct fuse_forget_in arg = {
1110 .nlookup = forget->forget_one.nlookup,
1111 };
1112 struct fuse_in_header ih = {
1113 .opcode = FUSE_FORGET,
1114 .nodeid = forget->forget_one.nodeid,
1115 .unique = fuse_get_unique(fiq),
1116 .len = sizeof(ih) + sizeof(arg),
1117 };
1118
1119 spin_unlock(&fiq->lock);
1120 kfree(forget);
1121 if (nbytes < ih.len)
1122 return -EINVAL;
1123
1124 err = fuse_copy_one(cs, &ih, sizeof(ih));
1125 if (!err)
1126 err = fuse_copy_one(cs, &arg, sizeof(arg));
1127 fuse_copy_finish(cs);
1128
1129 if (err)
1130 return err;
1131
1132 return ih.len;
1133 }
1134
fuse_read_batch_forget(struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes)1135 static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
1136 struct fuse_copy_state *cs, size_t nbytes)
1137 __releases(fiq->lock)
1138 {
1139 int err;
1140 unsigned max_forgets;
1141 unsigned count;
1142 struct fuse_forget_link *head;
1143 struct fuse_batch_forget_in arg = { .count = 0 };
1144 struct fuse_in_header ih = {
1145 .opcode = FUSE_BATCH_FORGET,
1146 .unique = fuse_get_unique(fiq),
1147 .len = sizeof(ih) + sizeof(arg),
1148 };
1149
1150 if (nbytes < ih.len) {
1151 spin_unlock(&fiq->lock);
1152 return -EINVAL;
1153 }
1154
1155 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1156 head = fuse_dequeue_forget(fiq, max_forgets, &count);
1157 spin_unlock(&fiq->lock);
1158
1159 arg.count = count;
1160 ih.len += count * sizeof(struct fuse_forget_one);
1161 err = fuse_copy_one(cs, &ih, sizeof(ih));
1162 if (!err)
1163 err = fuse_copy_one(cs, &arg, sizeof(arg));
1164
1165 while (head) {
1166 struct fuse_forget_link *forget = head;
1167
1168 if (!err) {
1169 err = fuse_copy_one(cs, &forget->forget_one,
1170 sizeof(forget->forget_one));
1171 }
1172 head = forget->next;
1173 kfree(forget);
1174 }
1175
1176 fuse_copy_finish(cs);
1177
1178 if (err)
1179 return err;
1180
1181 return ih.len;
1182 }
1183
fuse_read_forget(struct fuse_conn * fc,struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes)1184 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1185 struct fuse_copy_state *cs,
1186 size_t nbytes)
1187 __releases(fiq->lock)
1188 {
1189 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
1190 return fuse_read_single_forget(fiq, cs, nbytes);
1191 else
1192 return fuse_read_batch_forget(fiq, cs, nbytes);
1193 }
1194
1195 /*
1196 * Read a single request into the userspace filesystem's buffer. This
1197 * function waits until a request is available, then removes it from
1198 * the pending list and copies request data to userspace buffer. If
1199 * no reply is needed (FORGET) or request has been aborted or there
1200 * was an error during the copying then it's finished by calling
1201 * fuse_request_end(). Otherwise add it to the processing list, and set
1202 * the 'sent' flag.
1203 */
fuse_dev_do_read(struct fuse_dev * fud,struct file * file,struct fuse_copy_state * cs,size_t nbytes)1204 static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
1205 struct fuse_copy_state *cs, size_t nbytes)
1206 {
1207 ssize_t err;
1208 struct fuse_conn *fc = fud->fc;
1209 struct fuse_iqueue *fiq = &fc->iq;
1210 struct fuse_pqueue *fpq = &fud->pq;
1211 struct fuse_req *req;
1212 struct fuse_args *args;
1213 unsigned reqsize;
1214 unsigned int hash;
1215
1216 /*
1217 * Require sane minimum read buffer - that has capacity for fixed part
1218 * of any request header + negotiated max_write room for data.
1219 *
1220 * Historically libfuse reserves 4K for fixed header room, but e.g.
1221 * GlusterFS reserves only 80 bytes
1222 *
1223 * = `sizeof(fuse_in_header) + sizeof(fuse_write_in)`
1224 *
1225 * which is the absolute minimum any sane filesystem should be using
1226 * for header room.
1227 */
1228 if (nbytes < max_t(size_t, FUSE_MIN_READ_BUFFER,
1229 sizeof(struct fuse_in_header) +
1230 sizeof(struct fuse_write_in) +
1231 fc->max_write))
1232 return -EINVAL;
1233
1234 restart:
1235 for (;;) {
1236 spin_lock(&fiq->lock);
1237 if (!fiq->connected || request_pending(fiq))
1238 break;
1239 spin_unlock(&fiq->lock);
1240
1241 if (file->f_flags & O_NONBLOCK)
1242 return -EAGAIN;
1243 err = wait_event_interruptible_exclusive(fiq->waitq,
1244 !fiq->connected || request_pending(fiq));
1245 if (err)
1246 return err;
1247 }
1248
1249 if (!fiq->connected) {
1250 err = fc->aborted ? -ECONNABORTED : -ENODEV;
1251 goto err_unlock;
1252 }
1253
1254 if (!list_empty(&fiq->interrupts)) {
1255 req = list_entry(fiq->interrupts.next, struct fuse_req,
1256 intr_entry);
1257 return fuse_read_interrupt(fiq, cs, nbytes, req);
1258 }
1259
1260 if (forget_pending(fiq)) {
1261 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
1262 return fuse_read_forget(fc, fiq, cs, nbytes);
1263
1264 if (fiq->forget_batch <= -8)
1265 fiq->forget_batch = 16;
1266 }
1267
1268 req = list_entry(fiq->pending.next, struct fuse_req, list);
1269 clear_bit(FR_PENDING, &req->flags);
1270 list_del_init(&req->list);
1271 spin_unlock(&fiq->lock);
1272
1273 args = req->args;
1274 reqsize = req->in.h.len;
1275
1276 /* If request is too large, reply with an error and restart the read */
1277 if (nbytes < reqsize) {
1278 req->out.h.error = -EIO;
1279 /* SETXATTR is special, since it may contain too large data */
1280 if (args->opcode == FUSE_SETXATTR)
1281 req->out.h.error = -E2BIG;
1282 fuse_request_end(req);
1283 goto restart;
1284 }
1285 spin_lock(&fpq->lock);
1286 /*
1287 * Must not put request on fpq->io queue after having been shut down by
1288 * fuse_abort_conn()
1289 */
1290 if (!fpq->connected) {
1291 req->out.h.error = err = -ECONNABORTED;
1292 goto out_end;
1293
1294 }
1295 list_add(&req->list, &fpq->io);
1296 spin_unlock(&fpq->lock);
1297 cs->req = req;
1298 err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h));
1299 if (!err)
1300 err = fuse_copy_args(cs, args->in_numargs, args->in_pages,
1301 (struct fuse_arg *) args->in_args, 0);
1302 fuse_copy_finish(cs);
1303 spin_lock(&fpq->lock);
1304 clear_bit(FR_LOCKED, &req->flags);
1305 if (!fpq->connected) {
1306 err = fc->aborted ? -ECONNABORTED : -ENODEV;
1307 goto out_end;
1308 }
1309 if (err) {
1310 req->out.h.error = -EIO;
1311 goto out_end;
1312 }
1313 if (!test_bit(FR_ISREPLY, &req->flags)) {
1314 err = reqsize;
1315 goto out_end;
1316 }
1317 hash = fuse_req_hash(req->in.h.unique);
1318 list_move_tail(&req->list, &fpq->processing[hash]);
1319 __fuse_get_request(req);
1320 set_bit(FR_SENT, &req->flags);
1321 spin_unlock(&fpq->lock);
1322 /* matches barrier in request_wait_answer() */
1323 smp_mb__after_atomic();
1324 if (test_bit(FR_INTERRUPTED, &req->flags))
1325 queue_interrupt(req);
1326 fuse_put_request(req);
1327
1328 return reqsize;
1329
1330 out_end:
1331 if (!test_bit(FR_PRIVATE, &req->flags))
1332 list_del_init(&req->list);
1333 spin_unlock(&fpq->lock);
1334 fuse_request_end(req);
1335 return err;
1336
1337 err_unlock:
1338 spin_unlock(&fiq->lock);
1339 return err;
1340 }
1341
fuse_dev_open(struct inode * inode,struct file * file)1342 static int fuse_dev_open(struct inode *inode, struct file *file)
1343 {
1344 /*
1345 * The fuse device's file's private_data is used to hold
1346 * the fuse_conn(ection) when it is mounted, and is used to
1347 * keep track of whether the file has been mounted already.
1348 */
1349 file->private_data = NULL;
1350 return 0;
1351 }
1352
fuse_dev_read(struct kiocb * iocb,struct iov_iter * to)1353 static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
1354 {
1355 struct fuse_copy_state cs;
1356 struct file *file = iocb->ki_filp;
1357 struct fuse_dev *fud = fuse_get_dev(file);
1358
1359 if (!fud)
1360 return -EPERM;
1361
1362 if (!user_backed_iter(to))
1363 return -EINVAL;
1364
1365 fuse_copy_init(&cs, 1, to);
1366
1367 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
1368 }
1369
fuse_dev_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)1370 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1371 struct pipe_inode_info *pipe,
1372 size_t len, unsigned int flags)
1373 {
1374 int total, ret;
1375 int page_nr = 0;
1376 struct pipe_buffer *bufs;
1377 struct fuse_copy_state cs;
1378 struct fuse_dev *fud = fuse_get_dev(in);
1379
1380 if (!fud)
1381 return -EPERM;
1382
1383 bufs = kvmalloc_array(pipe->max_usage, sizeof(struct pipe_buffer),
1384 GFP_KERNEL);
1385 if (!bufs)
1386 return -ENOMEM;
1387
1388 fuse_copy_init(&cs, 1, NULL);
1389 cs.pipebufs = bufs;
1390 cs.pipe = pipe;
1391 ret = fuse_dev_do_read(fud, in, &cs, len);
1392 if (ret < 0)
1393 goto out;
1394
1395 if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) {
1396 ret = -EIO;
1397 goto out;
1398 }
1399
1400 for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
1401 /*
1402 * Need to be careful about this. Having buf->ops in module
1403 * code can Oops if the buffer persists after module unload.
1404 */
1405 bufs[page_nr].ops = &nosteal_pipe_buf_ops;
1406 bufs[page_nr].flags = 0;
1407 ret = add_to_pipe(pipe, &bufs[page_nr++]);
1408 if (unlikely(ret < 0))
1409 break;
1410 }
1411 if (total)
1412 ret = total;
1413 out:
1414 for (; page_nr < cs.nr_segs; page_nr++)
1415 put_page(bufs[page_nr].page);
1416
1417 kvfree(bufs);
1418 return ret;
1419 }
1420
fuse_notify_poll(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1421 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1422 struct fuse_copy_state *cs)
1423 {
1424 struct fuse_notify_poll_wakeup_out outarg;
1425 int err = -EINVAL;
1426
1427 if (size != sizeof(outarg))
1428 goto err;
1429
1430 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1431 if (err)
1432 goto err;
1433
1434 fuse_copy_finish(cs);
1435 return fuse_notify_poll_wakeup(fc, &outarg);
1436
1437 err:
1438 fuse_copy_finish(cs);
1439 return err;
1440 }
1441
fuse_notify_inval_inode(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1442 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1443 struct fuse_copy_state *cs)
1444 {
1445 struct fuse_notify_inval_inode_out outarg;
1446 int err = -EINVAL;
1447
1448 if (size != sizeof(outarg))
1449 goto err;
1450
1451 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1452 if (err)
1453 goto err;
1454 fuse_copy_finish(cs);
1455
1456 down_read(&fc->killsb);
1457 err = fuse_reverse_inval_inode(fc, outarg.ino,
1458 outarg.off, outarg.len);
1459 up_read(&fc->killsb);
1460 return err;
1461
1462 err:
1463 fuse_copy_finish(cs);
1464 return err;
1465 }
1466
fuse_notify_inval_entry(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1467 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1468 struct fuse_copy_state *cs)
1469 {
1470 struct fuse_notify_inval_entry_out outarg;
1471 int err = -ENOMEM;
1472 char *buf;
1473 struct qstr name;
1474
1475 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1476 if (!buf)
1477 goto err;
1478
1479 err = -EINVAL;
1480 if (size < sizeof(outarg))
1481 goto err;
1482
1483 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1484 if (err)
1485 goto err;
1486
1487 err = -ENAMETOOLONG;
1488 if (outarg.namelen > FUSE_NAME_MAX)
1489 goto err;
1490
1491 err = -EINVAL;
1492 if (size != sizeof(outarg) + outarg.namelen + 1)
1493 goto err;
1494
1495 name.name = buf;
1496 name.len = outarg.namelen;
1497 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1498 if (err)
1499 goto err;
1500 fuse_copy_finish(cs);
1501 buf[outarg.namelen] = 0;
1502
1503 down_read(&fc->killsb);
1504 err = fuse_reverse_inval_entry(fc, outarg.parent, 0, &name, outarg.flags);
1505 up_read(&fc->killsb);
1506 kfree(buf);
1507 return err;
1508
1509 err:
1510 kfree(buf);
1511 fuse_copy_finish(cs);
1512 return err;
1513 }
1514
fuse_notify_delete(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1515 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1516 struct fuse_copy_state *cs)
1517 {
1518 struct fuse_notify_delete_out outarg;
1519 int err = -ENOMEM;
1520 char *buf;
1521 struct qstr name;
1522
1523 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1524 if (!buf)
1525 goto err;
1526
1527 err = -EINVAL;
1528 if (size < sizeof(outarg))
1529 goto err;
1530
1531 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1532 if (err)
1533 goto err;
1534
1535 err = -ENAMETOOLONG;
1536 if (outarg.namelen > FUSE_NAME_MAX)
1537 goto err;
1538
1539 err = -EINVAL;
1540 if (size != sizeof(outarg) + outarg.namelen + 1)
1541 goto err;
1542
1543 name.name = buf;
1544 name.len = outarg.namelen;
1545 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1546 if (err)
1547 goto err;
1548 fuse_copy_finish(cs);
1549 buf[outarg.namelen] = 0;
1550
1551 down_read(&fc->killsb);
1552 err = fuse_reverse_inval_entry(fc, outarg.parent, outarg.child, &name, 0);
1553 up_read(&fc->killsb);
1554 kfree(buf);
1555 return err;
1556
1557 err:
1558 kfree(buf);
1559 fuse_copy_finish(cs);
1560 return err;
1561 }
1562
fuse_notify_store(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1563 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1564 struct fuse_copy_state *cs)
1565 {
1566 struct fuse_notify_store_out outarg;
1567 struct inode *inode;
1568 struct address_space *mapping;
1569 u64 nodeid;
1570 int err;
1571 pgoff_t index;
1572 unsigned int offset;
1573 unsigned int num;
1574 loff_t file_size;
1575 loff_t end;
1576
1577 err = -EINVAL;
1578 if (size < sizeof(outarg))
1579 goto out_finish;
1580
1581 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1582 if (err)
1583 goto out_finish;
1584
1585 err = -EINVAL;
1586 if (size - sizeof(outarg) != outarg.size)
1587 goto out_finish;
1588
1589 nodeid = outarg.nodeid;
1590
1591 down_read(&fc->killsb);
1592
1593 err = -ENOENT;
1594 inode = fuse_ilookup(fc, nodeid, NULL);
1595 if (!inode)
1596 goto out_up_killsb;
1597
1598 mapping = inode->i_mapping;
1599 index = outarg.offset >> PAGE_SHIFT;
1600 offset = outarg.offset & ~PAGE_MASK;
1601 file_size = i_size_read(inode);
1602 end = outarg.offset + outarg.size;
1603 if (end > file_size) {
1604 file_size = end;
1605 fuse_write_update_attr(inode, file_size, outarg.size);
1606 }
1607
1608 num = outarg.size;
1609 while (num) {
1610 struct page *page;
1611 unsigned int this_num;
1612
1613 err = -ENOMEM;
1614 page = find_or_create_page(mapping, index,
1615 mapping_gfp_mask(mapping));
1616 if (!page)
1617 goto out_iput;
1618
1619 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1620 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1621 if (!PageUptodate(page) && !err && offset == 0 &&
1622 (this_num == PAGE_SIZE || file_size == end)) {
1623 zero_user_segment(page, this_num, PAGE_SIZE);
1624 SetPageUptodate(page);
1625 }
1626 unlock_page(page);
1627 put_page(page);
1628
1629 if (err)
1630 goto out_iput;
1631
1632 num -= this_num;
1633 offset = 0;
1634 index++;
1635 }
1636
1637 err = 0;
1638
1639 out_iput:
1640 iput(inode);
1641 out_up_killsb:
1642 up_read(&fc->killsb);
1643 out_finish:
1644 fuse_copy_finish(cs);
1645 return err;
1646 }
1647
1648 struct fuse_retrieve_args {
1649 struct fuse_args_pages ap;
1650 struct fuse_notify_retrieve_in inarg;
1651 };
1652
fuse_retrieve_end(struct fuse_mount * fm,struct fuse_args * args,int error)1653 static void fuse_retrieve_end(struct fuse_mount *fm, struct fuse_args *args,
1654 int error)
1655 {
1656 struct fuse_retrieve_args *ra =
1657 container_of(args, typeof(*ra), ap.args);
1658
1659 release_pages(ra->ap.pages, ra->ap.num_pages);
1660 kfree(ra);
1661 }
1662
fuse_retrieve(struct fuse_mount * fm,struct inode * inode,struct fuse_notify_retrieve_out * outarg)1663 static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode,
1664 struct fuse_notify_retrieve_out *outarg)
1665 {
1666 int err;
1667 struct address_space *mapping = inode->i_mapping;
1668 pgoff_t index;
1669 loff_t file_size;
1670 unsigned int num;
1671 unsigned int offset;
1672 size_t total_len = 0;
1673 unsigned int num_pages;
1674 struct fuse_conn *fc = fm->fc;
1675 struct fuse_retrieve_args *ra;
1676 size_t args_size = sizeof(*ra);
1677 struct fuse_args_pages *ap;
1678 struct fuse_args *args;
1679
1680 offset = outarg->offset & ~PAGE_MASK;
1681 file_size = i_size_read(inode);
1682
1683 num = min(outarg->size, fc->max_write);
1684 if (outarg->offset > file_size)
1685 num = 0;
1686 else if (outarg->offset + num > file_size)
1687 num = file_size - outarg->offset;
1688
1689 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1690 num_pages = min(num_pages, fc->max_pages);
1691
1692 args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0]));
1693
1694 ra = kzalloc(args_size, GFP_KERNEL);
1695 if (!ra)
1696 return -ENOMEM;
1697
1698 ap = &ra->ap;
1699 ap->pages = (void *) (ra + 1);
1700 ap->descs = (void *) (ap->pages + num_pages);
1701
1702 args = &ap->args;
1703 args->nodeid = outarg->nodeid;
1704 args->opcode = FUSE_NOTIFY_REPLY;
1705 args->in_numargs = 2;
1706 args->in_pages = true;
1707 args->end = fuse_retrieve_end;
1708
1709 index = outarg->offset >> PAGE_SHIFT;
1710
1711 while (num && ap->num_pages < num_pages) {
1712 struct page *page;
1713 unsigned int this_num;
1714
1715 page = find_get_page(mapping, index);
1716 if (!page)
1717 break;
1718
1719 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1720 ap->pages[ap->num_pages] = page;
1721 ap->descs[ap->num_pages].offset = offset;
1722 ap->descs[ap->num_pages].length = this_num;
1723 ap->num_pages++;
1724
1725 offset = 0;
1726 num -= this_num;
1727 total_len += this_num;
1728 index++;
1729 }
1730 ra->inarg.offset = outarg->offset;
1731 ra->inarg.size = total_len;
1732 args->in_args[0].size = sizeof(ra->inarg);
1733 args->in_args[0].value = &ra->inarg;
1734 args->in_args[1].size = total_len;
1735
1736 err = fuse_simple_notify_reply(fm, args, outarg->notify_unique);
1737 if (err)
1738 fuse_retrieve_end(fm, args, err);
1739
1740 return err;
1741 }
1742
fuse_notify_retrieve(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1743 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1744 struct fuse_copy_state *cs)
1745 {
1746 struct fuse_notify_retrieve_out outarg;
1747 struct fuse_mount *fm;
1748 struct inode *inode;
1749 u64 nodeid;
1750 int err;
1751
1752 err = -EINVAL;
1753 if (size != sizeof(outarg))
1754 goto copy_finish;
1755
1756 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1757 if (err)
1758 goto copy_finish;
1759
1760 fuse_copy_finish(cs);
1761
1762 down_read(&fc->killsb);
1763 err = -ENOENT;
1764 nodeid = outarg.nodeid;
1765
1766 inode = fuse_ilookup(fc, nodeid, &fm);
1767 if (inode) {
1768 err = fuse_retrieve(fm, inode, &outarg);
1769 iput(inode);
1770 }
1771 up_read(&fc->killsb);
1772
1773 return err;
1774
1775 copy_finish:
1776 fuse_copy_finish(cs);
1777 return err;
1778 }
1779
fuse_notify(struct fuse_conn * fc,enum fuse_notify_code code,unsigned int size,struct fuse_copy_state * cs)1780 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1781 unsigned int size, struct fuse_copy_state *cs)
1782 {
1783 /* Don't try to move pages (yet) */
1784 cs->move_pages = 0;
1785
1786 switch (code) {
1787 case FUSE_NOTIFY_POLL:
1788 return fuse_notify_poll(fc, size, cs);
1789
1790 case FUSE_NOTIFY_INVAL_INODE:
1791 return fuse_notify_inval_inode(fc, size, cs);
1792
1793 case FUSE_NOTIFY_INVAL_ENTRY:
1794 return fuse_notify_inval_entry(fc, size, cs);
1795
1796 case FUSE_NOTIFY_STORE:
1797 return fuse_notify_store(fc, size, cs);
1798
1799 case FUSE_NOTIFY_RETRIEVE:
1800 return fuse_notify_retrieve(fc, size, cs);
1801
1802 case FUSE_NOTIFY_DELETE:
1803 return fuse_notify_delete(fc, size, cs);
1804
1805 default:
1806 fuse_copy_finish(cs);
1807 return -EINVAL;
1808 }
1809 }
1810
1811 /* Look up request on processing list by unique ID */
request_find(struct fuse_pqueue * fpq,u64 unique)1812 static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
1813 {
1814 unsigned int hash = fuse_req_hash(unique);
1815 struct fuse_req *req;
1816
1817 list_for_each_entry(req, &fpq->processing[hash], list) {
1818 if (req->in.h.unique == unique)
1819 return req;
1820 }
1821 return NULL;
1822 }
1823
copy_out_args(struct fuse_copy_state * cs,struct fuse_args * args,unsigned nbytes)1824 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
1825 unsigned nbytes)
1826 {
1827 unsigned reqsize = sizeof(struct fuse_out_header);
1828
1829 reqsize += fuse_len_args(args->out_numargs, args->out_args);
1830
1831 if (reqsize < nbytes || (reqsize > nbytes && !args->out_argvar))
1832 return -EINVAL;
1833 else if (reqsize > nbytes) {
1834 struct fuse_arg *lastarg = &args->out_args[args->out_numargs-1];
1835 unsigned diffsize = reqsize - nbytes;
1836
1837 if (diffsize > lastarg->size)
1838 return -EINVAL;
1839 lastarg->size -= diffsize;
1840 }
1841 return fuse_copy_args(cs, args->out_numargs, args->out_pages,
1842 args->out_args, args->page_zeroing);
1843 }
1844
1845 /*
1846 * Write a single reply to a request. First the header is copied from
1847 * the write buffer. The request is then searched on the processing
1848 * list by the unique ID found in the header. If found, then remove
1849 * it from the list and copy the rest of the buffer to the request.
1850 * The request is finished by calling fuse_request_end().
1851 */
fuse_dev_do_write(struct fuse_dev * fud,struct fuse_copy_state * cs,size_t nbytes)1852 static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
1853 struct fuse_copy_state *cs, size_t nbytes)
1854 {
1855 int err;
1856 struct fuse_conn *fc = fud->fc;
1857 struct fuse_pqueue *fpq = &fud->pq;
1858 struct fuse_req *req;
1859 struct fuse_out_header oh;
1860
1861 err = -EINVAL;
1862 if (nbytes < sizeof(struct fuse_out_header))
1863 goto out;
1864
1865 err = fuse_copy_one(cs, &oh, sizeof(oh));
1866 if (err)
1867 goto copy_finish;
1868
1869 err = -EINVAL;
1870 if (oh.len != nbytes)
1871 goto copy_finish;
1872
1873 /*
1874 * Zero oh.unique indicates unsolicited notification message
1875 * and error contains notification code.
1876 */
1877 if (!oh.unique) {
1878 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1879 goto out;
1880 }
1881
1882 err = -EINVAL;
1883 if (oh.error <= -512 || oh.error > 0)
1884 goto copy_finish;
1885
1886 spin_lock(&fpq->lock);
1887 req = NULL;
1888 if (fpq->connected)
1889 req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
1890
1891 err = -ENOENT;
1892 if (!req) {
1893 spin_unlock(&fpq->lock);
1894 goto copy_finish;
1895 }
1896
1897 /* Is it an interrupt reply ID? */
1898 if (oh.unique & FUSE_INT_REQ_BIT) {
1899 __fuse_get_request(req);
1900 spin_unlock(&fpq->lock);
1901
1902 err = 0;
1903 if (nbytes != sizeof(struct fuse_out_header))
1904 err = -EINVAL;
1905 else if (oh.error == -ENOSYS)
1906 fc->no_interrupt = 1;
1907 else if (oh.error == -EAGAIN)
1908 err = queue_interrupt(req);
1909
1910 fuse_put_request(req);
1911
1912 goto copy_finish;
1913 }
1914
1915 clear_bit(FR_SENT, &req->flags);
1916 list_move(&req->list, &fpq->io);
1917 req->out.h = oh;
1918 set_bit(FR_LOCKED, &req->flags);
1919 spin_unlock(&fpq->lock);
1920 cs->req = req;
1921 if (!req->args->page_replace)
1922 cs->move_pages = 0;
1923
1924 if (oh.error)
1925 err = nbytes != sizeof(oh) ? -EINVAL : 0;
1926 else
1927 err = copy_out_args(cs, req->args, nbytes);
1928 fuse_copy_finish(cs);
1929
1930 spin_lock(&fpq->lock);
1931 clear_bit(FR_LOCKED, &req->flags);
1932 if (!fpq->connected)
1933 err = -ENOENT;
1934 else if (err)
1935 req->out.h.error = -EIO;
1936 if (!test_bit(FR_PRIVATE, &req->flags))
1937 list_del_init(&req->list);
1938 spin_unlock(&fpq->lock);
1939
1940 fuse_request_end(req);
1941 out:
1942 return err ? err : nbytes;
1943
1944 copy_finish:
1945 fuse_copy_finish(cs);
1946 goto out;
1947 }
1948
fuse_dev_write(struct kiocb * iocb,struct iov_iter * from)1949 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
1950 {
1951 struct fuse_copy_state cs;
1952 struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
1953
1954 if (!fud)
1955 return -EPERM;
1956
1957 if (!user_backed_iter(from))
1958 return -EINVAL;
1959
1960 fuse_copy_init(&cs, 0, from);
1961
1962 return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
1963 }
1964
fuse_dev_splice_write(struct pipe_inode_info * pipe,struct file * out,loff_t * ppos,size_t len,unsigned int flags)1965 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1966 struct file *out, loff_t *ppos,
1967 size_t len, unsigned int flags)
1968 {
1969 unsigned int head, tail, mask, count;
1970 unsigned nbuf;
1971 unsigned idx;
1972 struct pipe_buffer *bufs;
1973 struct fuse_copy_state cs;
1974 struct fuse_dev *fud;
1975 size_t rem;
1976 ssize_t ret;
1977
1978 fud = fuse_get_dev(out);
1979 if (!fud)
1980 return -EPERM;
1981
1982 pipe_lock(pipe);
1983
1984 head = pipe->head;
1985 tail = pipe->tail;
1986 mask = pipe->ring_size - 1;
1987 count = head - tail;
1988
1989 bufs = kvmalloc_array(count, sizeof(struct pipe_buffer), GFP_KERNEL);
1990 if (!bufs) {
1991 pipe_unlock(pipe);
1992 return -ENOMEM;
1993 }
1994
1995 nbuf = 0;
1996 rem = 0;
1997 for (idx = tail; idx != head && rem < len; idx++)
1998 rem += pipe->bufs[idx & mask].len;
1999
2000 ret = -EINVAL;
2001 if (rem < len)
2002 goto out_free;
2003
2004 rem = len;
2005 while (rem) {
2006 struct pipe_buffer *ibuf;
2007 struct pipe_buffer *obuf;
2008
2009 if (WARN_ON(nbuf >= count || tail == head))
2010 goto out_free;
2011
2012 ibuf = &pipe->bufs[tail & mask];
2013 obuf = &bufs[nbuf];
2014
2015 if (rem >= ibuf->len) {
2016 *obuf = *ibuf;
2017 ibuf->ops = NULL;
2018 tail++;
2019 pipe->tail = tail;
2020 } else {
2021 if (!pipe_buf_get(pipe, ibuf))
2022 goto out_free;
2023
2024 *obuf = *ibuf;
2025 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2026 obuf->len = rem;
2027 ibuf->offset += obuf->len;
2028 ibuf->len -= obuf->len;
2029 }
2030 nbuf++;
2031 rem -= obuf->len;
2032 }
2033 pipe_unlock(pipe);
2034
2035 fuse_copy_init(&cs, 0, NULL);
2036 cs.pipebufs = bufs;
2037 cs.nr_segs = nbuf;
2038 cs.pipe = pipe;
2039
2040 if (flags & SPLICE_F_MOVE)
2041 cs.move_pages = 1;
2042
2043 ret = fuse_dev_do_write(fud, &cs, len);
2044
2045 pipe_lock(pipe);
2046 out_free:
2047 for (idx = 0; idx < nbuf; idx++) {
2048 struct pipe_buffer *buf = &bufs[idx];
2049
2050 if (buf->ops)
2051 pipe_buf_release(pipe, buf);
2052 }
2053 pipe_unlock(pipe);
2054
2055 kvfree(bufs);
2056 return ret;
2057 }
2058
fuse_dev_poll(struct file * file,poll_table * wait)2059 static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
2060 {
2061 __poll_t mask = EPOLLOUT | EPOLLWRNORM;
2062 struct fuse_iqueue *fiq;
2063 struct fuse_dev *fud = fuse_get_dev(file);
2064
2065 if (!fud)
2066 return EPOLLERR;
2067
2068 fiq = &fud->fc->iq;
2069 poll_wait(file, &fiq->waitq, wait);
2070
2071 spin_lock(&fiq->lock);
2072 if (!fiq->connected)
2073 mask = EPOLLERR;
2074 else if (request_pending(fiq))
2075 mask |= EPOLLIN | EPOLLRDNORM;
2076 spin_unlock(&fiq->lock);
2077
2078 return mask;
2079 }
2080
2081 /* Abort all requests on the given list (pending or processing) */
end_requests(struct list_head * head)2082 static void end_requests(struct list_head *head)
2083 {
2084 while (!list_empty(head)) {
2085 struct fuse_req *req;
2086 req = list_entry(head->next, struct fuse_req, list);
2087 req->out.h.error = -ECONNABORTED;
2088 clear_bit(FR_SENT, &req->flags);
2089 list_del_init(&req->list);
2090 fuse_request_end(req);
2091 }
2092 }
2093
end_polls(struct fuse_conn * fc)2094 static void end_polls(struct fuse_conn *fc)
2095 {
2096 struct rb_node *p;
2097
2098 p = rb_first(&fc->polled_files);
2099
2100 while (p) {
2101 struct fuse_file *ff;
2102 ff = rb_entry(p, struct fuse_file, polled_node);
2103 wake_up_interruptible_all(&ff->poll_wait);
2104
2105 p = rb_next(p);
2106 }
2107 }
2108
2109 /*
2110 * Abort all requests.
2111 *
2112 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2113 * filesystem.
2114 *
2115 * The same effect is usually achievable through killing the filesystem daemon
2116 * and all users of the filesystem. The exception is the combination of an
2117 * asynchronous request and the tricky deadlock (see
2118 * Documentation/filesystems/fuse.rst).
2119 *
2120 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2121 * requests, they should be finished off immediately. Locked requests will be
2122 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2123 * requests. It is possible that some request will finish before we can. This
2124 * is OK, the request will in that case be removed from the list before we touch
2125 * it.
2126 */
fuse_abort_conn(struct fuse_conn * fc)2127 void fuse_abort_conn(struct fuse_conn *fc)
2128 {
2129 struct fuse_iqueue *fiq = &fc->iq;
2130
2131 spin_lock(&fc->lock);
2132 if (fc->connected) {
2133 struct fuse_dev *fud;
2134 struct fuse_req *req, *next;
2135 LIST_HEAD(to_end);
2136 unsigned int i;
2137
2138 /* Background queuing checks fc->connected under bg_lock */
2139 spin_lock(&fc->bg_lock);
2140 fc->connected = 0;
2141 spin_unlock(&fc->bg_lock);
2142
2143 fuse_set_initialized(fc);
2144 list_for_each_entry(fud, &fc->devices, entry) {
2145 struct fuse_pqueue *fpq = &fud->pq;
2146
2147 spin_lock(&fpq->lock);
2148 fpq->connected = 0;
2149 list_for_each_entry_safe(req, next, &fpq->io, list) {
2150 req->out.h.error = -ECONNABORTED;
2151 spin_lock(&req->waitq.lock);
2152 set_bit(FR_ABORTED, &req->flags);
2153 if (!test_bit(FR_LOCKED, &req->flags)) {
2154 set_bit(FR_PRIVATE, &req->flags);
2155 __fuse_get_request(req);
2156 list_move(&req->list, &to_end);
2157 }
2158 spin_unlock(&req->waitq.lock);
2159 }
2160 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
2161 list_splice_tail_init(&fpq->processing[i],
2162 &to_end);
2163 spin_unlock(&fpq->lock);
2164 }
2165 spin_lock(&fc->bg_lock);
2166 fc->blocked = 0;
2167 fc->max_background = UINT_MAX;
2168 flush_bg_queue(fc);
2169 spin_unlock(&fc->bg_lock);
2170
2171 spin_lock(&fiq->lock);
2172 fiq->connected = 0;
2173 list_for_each_entry(req, &fiq->pending, list)
2174 clear_bit(FR_PENDING, &req->flags);
2175 list_splice_tail_init(&fiq->pending, &to_end);
2176 while (forget_pending(fiq))
2177 kfree(fuse_dequeue_forget(fiq, 1, NULL));
2178 wake_up_all(&fiq->waitq);
2179 spin_unlock(&fiq->lock);
2180 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2181 end_polls(fc);
2182 wake_up_all(&fc->blocked_waitq);
2183 spin_unlock(&fc->lock);
2184
2185 end_requests(&to_end);
2186 } else {
2187 spin_unlock(&fc->lock);
2188 }
2189 }
2190 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2191
fuse_wait_aborted(struct fuse_conn * fc)2192 void fuse_wait_aborted(struct fuse_conn *fc)
2193 {
2194 /* matches implicit memory barrier in fuse_drop_waiting() */
2195 smp_mb();
2196 wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
2197 }
2198
fuse_dev_release(struct inode * inode,struct file * file)2199 int fuse_dev_release(struct inode *inode, struct file *file)
2200 {
2201 struct fuse_dev *fud = fuse_get_dev(file);
2202
2203 if (fud) {
2204 struct fuse_conn *fc = fud->fc;
2205 struct fuse_pqueue *fpq = &fud->pq;
2206 LIST_HEAD(to_end);
2207 unsigned int i;
2208
2209 spin_lock(&fpq->lock);
2210 WARN_ON(!list_empty(&fpq->io));
2211 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
2212 list_splice_init(&fpq->processing[i], &to_end);
2213 spin_unlock(&fpq->lock);
2214
2215 end_requests(&to_end);
2216
2217 /* Are we the last open device? */
2218 if (atomic_dec_and_test(&fc->dev_count)) {
2219 WARN_ON(fc->iq.fasync != NULL);
2220 fuse_abort_conn(fc);
2221 }
2222 fuse_dev_free(fud);
2223 }
2224 return 0;
2225 }
2226 EXPORT_SYMBOL_GPL(fuse_dev_release);
2227
fuse_dev_fasync(int fd,struct file * file,int on)2228 static int fuse_dev_fasync(int fd, struct file *file, int on)
2229 {
2230 struct fuse_dev *fud = fuse_get_dev(file);
2231
2232 if (!fud)
2233 return -EPERM;
2234
2235 /* No locking - fasync_helper does its own locking */
2236 return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
2237 }
2238
fuse_device_clone(struct fuse_conn * fc,struct file * new)2239 static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
2240 {
2241 struct fuse_dev *fud;
2242
2243 if (new->private_data)
2244 return -EINVAL;
2245
2246 fud = fuse_dev_alloc_install(fc);
2247 if (!fud)
2248 return -ENOMEM;
2249
2250 new->private_data = fud;
2251 atomic_inc(&fc->dev_count);
2252
2253 return 0;
2254 }
2255
fuse_dev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2256 static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2257 unsigned long arg)
2258 {
2259 int res;
2260 int oldfd;
2261 struct fuse_dev *fud = NULL;
2262 struct fd f;
2263
2264 switch (cmd) {
2265 case FUSE_DEV_IOC_CLONE:
2266 if (get_user(oldfd, (__u32 __user *)arg))
2267 return -EFAULT;
2268
2269 f = fdget(oldfd);
2270 if (!f.file)
2271 return -EINVAL;
2272
2273 /*
2274 * Check against file->f_op because CUSE
2275 * uses the same ioctl handler.
2276 */
2277 if (f.file->f_op == file->f_op)
2278 fud = fuse_get_dev(f.file);
2279
2280 res = -EINVAL;
2281 if (fud) {
2282 mutex_lock(&fuse_mutex);
2283 res = fuse_device_clone(fud->fc, file);
2284 mutex_unlock(&fuse_mutex);
2285 }
2286 fdput(f);
2287 break;
2288 default:
2289 res = -ENOTTY;
2290 break;
2291 }
2292 return res;
2293 }
2294
2295 const struct file_operations fuse_dev_operations = {
2296 .owner = THIS_MODULE,
2297 .open = fuse_dev_open,
2298 .llseek = no_llseek,
2299 .read_iter = fuse_dev_read,
2300 .splice_read = fuse_dev_splice_read,
2301 .write_iter = fuse_dev_write,
2302 .splice_write = fuse_dev_splice_write,
2303 .poll = fuse_dev_poll,
2304 .release = fuse_dev_release,
2305 .fasync = fuse_dev_fasync,
2306 .unlocked_ioctl = fuse_dev_ioctl,
2307 .compat_ioctl = compat_ptr_ioctl,
2308 };
2309 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2310
2311 static struct miscdevice fuse_miscdevice = {
2312 .minor = FUSE_MINOR,
2313 .name = "fuse",
2314 .fops = &fuse_dev_operations,
2315 };
2316
fuse_dev_init(void)2317 int __init fuse_dev_init(void)
2318 {
2319 int err = -ENOMEM;
2320 fuse_req_cachep = kmem_cache_create("fuse_request",
2321 sizeof(struct fuse_req),
2322 0, 0, NULL);
2323 if (!fuse_req_cachep)
2324 goto out;
2325
2326 err = misc_register(&fuse_miscdevice);
2327 if (err)
2328 goto out_cache_clean;
2329
2330 return 0;
2331
2332 out_cache_clean:
2333 kmem_cache_destroy(fuse_req_cachep);
2334 out:
2335 return err;
2336 }
2337
fuse_dev_cleanup(void)2338 void fuse_dev_cleanup(void)
2339 {
2340 misc_deregister(&fuse_miscdevice);
2341 kmem_cache_destroy(fuse_req_cachep);
2342 }
2343