1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * virtio-fs: Virtio Filesystem
4 * Copyright (C) 2018 Red Hat, Inc.
5 */
6
7 #include <linux/fs.h>
8 #include <linux/dax.h>
9 #include <linux/pci.h>
10 #include <linux/pfn_t.h>
11 #include <linux/memremap.h>
12 #include <linux/module.h>
13 #include <linux/virtio.h>
14 #include <linux/virtio_fs.h>
15 #include <linux/delay.h>
16 #include <linux/fs_context.h>
17 #include <linux/fs_parser.h>
18 #include <linux/highmem.h>
19 #include <linux/uio.h>
20 #include "fuse_i.h"
21
22 /* Used to help calculate the FUSE connection's max_pages limit for a request's
23 * size. Parts of the struct fuse_req are sliced into scattergather lists in
24 * addition to the pages used, so this can help account for that overhead.
25 */
26 #define FUSE_HEADER_OVERHEAD 4
27
28 /* List of virtio-fs device instances and a lock for the list. Also provides
29 * mutual exclusion in device removal and mounting path
30 */
31 static DEFINE_MUTEX(virtio_fs_mutex);
32 static LIST_HEAD(virtio_fs_instances);
33
34 enum {
35 VQ_HIPRIO,
36 VQ_REQUEST
37 };
38
39 #define VQ_NAME_LEN 24
40
41 /* Per-virtqueue state */
42 struct virtio_fs_vq {
43 spinlock_t lock;
44 struct virtqueue *vq; /* protected by ->lock */
45 struct work_struct done_work;
46 struct list_head queued_reqs;
47 struct list_head end_reqs; /* End these requests */
48 struct delayed_work dispatch_work;
49 struct fuse_dev *fud;
50 bool connected;
51 long in_flight;
52 struct completion in_flight_zero; /* No inflight requests */
53 char name[VQ_NAME_LEN];
54 } ____cacheline_aligned_in_smp;
55
56 /* A virtio-fs device instance */
57 struct virtio_fs {
58 struct kref refcount;
59 struct list_head list; /* on virtio_fs_instances */
60 char *tag;
61 struct virtio_fs_vq *vqs;
62 unsigned int nvqs; /* number of virtqueues */
63 unsigned int num_request_queues; /* number of request queues */
64 struct dax_device *dax_dev;
65
66 /* DAX memory window where file contents are mapped */
67 void *window_kaddr;
68 phys_addr_t window_phys_addr;
69 size_t window_len;
70 };
71
72 struct virtio_fs_forget_req {
73 struct fuse_in_header ih;
74 struct fuse_forget_in arg;
75 };
76
77 struct virtio_fs_forget {
78 /* This request can be temporarily queued on virt queue */
79 struct list_head list;
80 struct virtio_fs_forget_req req;
81 };
82
83 struct virtio_fs_req_work {
84 struct fuse_req *req;
85 struct virtio_fs_vq *fsvq;
86 struct work_struct done_work;
87 };
88
89 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
90 struct fuse_req *req, bool in_flight);
91
92 static const struct constant_table dax_param_enums[] = {
93 {"always", FUSE_DAX_ALWAYS },
94 {"never", FUSE_DAX_NEVER },
95 {"inode", FUSE_DAX_INODE_USER },
96 {}
97 };
98
99 enum {
100 OPT_DAX,
101 OPT_DAX_ENUM,
102 };
103
104 static const struct fs_parameter_spec virtio_fs_parameters[] = {
105 fsparam_flag("dax", OPT_DAX),
106 fsparam_enum("dax", OPT_DAX_ENUM, dax_param_enums),
107 {}
108 };
109
virtio_fs_parse_param(struct fs_context * fsc,struct fs_parameter * param)110 static int virtio_fs_parse_param(struct fs_context *fsc,
111 struct fs_parameter *param)
112 {
113 struct fs_parse_result result;
114 struct fuse_fs_context *ctx = fsc->fs_private;
115 int opt;
116
117 opt = fs_parse(fsc, virtio_fs_parameters, param, &result);
118 if (opt < 0)
119 return opt;
120
121 switch (opt) {
122 case OPT_DAX:
123 ctx->dax_mode = FUSE_DAX_ALWAYS;
124 break;
125 case OPT_DAX_ENUM:
126 ctx->dax_mode = result.uint_32;
127 break;
128 default:
129 return -EINVAL;
130 }
131
132 return 0;
133 }
134
virtio_fs_free_fsc(struct fs_context * fsc)135 static void virtio_fs_free_fsc(struct fs_context *fsc)
136 {
137 struct fuse_fs_context *ctx = fsc->fs_private;
138
139 kfree(ctx);
140 }
141
vq_to_fsvq(struct virtqueue * vq)142 static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
143 {
144 struct virtio_fs *fs = vq->vdev->priv;
145
146 return &fs->vqs[vq->index];
147 }
148
149 /* Should be called with fsvq->lock held. */
inc_in_flight_req(struct virtio_fs_vq * fsvq)150 static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
151 {
152 fsvq->in_flight++;
153 }
154
155 /* Should be called with fsvq->lock held. */
dec_in_flight_req(struct virtio_fs_vq * fsvq)156 static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
157 {
158 WARN_ON(fsvq->in_flight <= 0);
159 fsvq->in_flight--;
160 if (!fsvq->in_flight)
161 complete(&fsvq->in_flight_zero);
162 }
163
release_virtio_fs_obj(struct kref * ref)164 static void release_virtio_fs_obj(struct kref *ref)
165 {
166 struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount);
167
168 kfree(vfs->vqs);
169 kfree(vfs);
170 }
171
172 /* Make sure virtiofs_mutex is held */
virtio_fs_put(struct virtio_fs * fs)173 static void virtio_fs_put(struct virtio_fs *fs)
174 {
175 kref_put(&fs->refcount, release_virtio_fs_obj);
176 }
177
virtio_fs_fiq_release(struct fuse_iqueue * fiq)178 static void virtio_fs_fiq_release(struct fuse_iqueue *fiq)
179 {
180 struct virtio_fs *vfs = fiq->priv;
181
182 mutex_lock(&virtio_fs_mutex);
183 virtio_fs_put(vfs);
184 mutex_unlock(&virtio_fs_mutex);
185 }
186
virtio_fs_drain_queue(struct virtio_fs_vq * fsvq)187 static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
188 {
189 WARN_ON(fsvq->in_flight < 0);
190
191 /* Wait for in flight requests to finish.*/
192 spin_lock(&fsvq->lock);
193 if (fsvq->in_flight) {
194 /* We are holding virtio_fs_mutex. There should not be any
195 * waiters waiting for completion.
196 */
197 reinit_completion(&fsvq->in_flight_zero);
198 spin_unlock(&fsvq->lock);
199 wait_for_completion(&fsvq->in_flight_zero);
200 } else {
201 spin_unlock(&fsvq->lock);
202 }
203
204 flush_work(&fsvq->done_work);
205 flush_delayed_work(&fsvq->dispatch_work);
206 }
207
virtio_fs_drain_all_queues_locked(struct virtio_fs * fs)208 static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs)
209 {
210 struct virtio_fs_vq *fsvq;
211 int i;
212
213 for (i = 0; i < fs->nvqs; i++) {
214 fsvq = &fs->vqs[i];
215 virtio_fs_drain_queue(fsvq);
216 }
217 }
218
virtio_fs_drain_all_queues(struct virtio_fs * fs)219 static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
220 {
221 /* Provides mutual exclusion between ->remove and ->kill_sb
222 * paths. We don't want both of these draining queue at the
223 * same time. Current completion logic reinits completion
224 * and that means there should not be any other thread
225 * doing reinit or waiting for completion already.
226 */
227 mutex_lock(&virtio_fs_mutex);
228 virtio_fs_drain_all_queues_locked(fs);
229 mutex_unlock(&virtio_fs_mutex);
230 }
231
virtio_fs_start_all_queues(struct virtio_fs * fs)232 static void virtio_fs_start_all_queues(struct virtio_fs *fs)
233 {
234 struct virtio_fs_vq *fsvq;
235 int i;
236
237 for (i = 0; i < fs->nvqs; i++) {
238 fsvq = &fs->vqs[i];
239 spin_lock(&fsvq->lock);
240 fsvq->connected = true;
241 spin_unlock(&fsvq->lock);
242 }
243 }
244
245 /* Add a new instance to the list or return -EEXIST if tag name exists*/
virtio_fs_add_instance(struct virtio_fs * fs)246 static int virtio_fs_add_instance(struct virtio_fs *fs)
247 {
248 struct virtio_fs *fs2;
249 bool duplicate = false;
250
251 mutex_lock(&virtio_fs_mutex);
252
253 list_for_each_entry(fs2, &virtio_fs_instances, list) {
254 if (strcmp(fs->tag, fs2->tag) == 0)
255 duplicate = true;
256 }
257
258 if (!duplicate)
259 list_add_tail(&fs->list, &virtio_fs_instances);
260
261 mutex_unlock(&virtio_fs_mutex);
262
263 if (duplicate)
264 return -EEXIST;
265 return 0;
266 }
267
268 /* Return the virtio_fs with a given tag, or NULL */
virtio_fs_find_instance(const char * tag)269 static struct virtio_fs *virtio_fs_find_instance(const char *tag)
270 {
271 struct virtio_fs *fs;
272
273 mutex_lock(&virtio_fs_mutex);
274
275 list_for_each_entry(fs, &virtio_fs_instances, list) {
276 if (strcmp(fs->tag, tag) == 0) {
277 kref_get(&fs->refcount);
278 goto found;
279 }
280 }
281
282 fs = NULL; /* not found */
283
284 found:
285 mutex_unlock(&virtio_fs_mutex);
286
287 return fs;
288 }
289
virtio_fs_free_devs(struct virtio_fs * fs)290 static void virtio_fs_free_devs(struct virtio_fs *fs)
291 {
292 unsigned int i;
293
294 for (i = 0; i < fs->nvqs; i++) {
295 struct virtio_fs_vq *fsvq = &fs->vqs[i];
296
297 if (!fsvq->fud)
298 continue;
299
300 fuse_dev_free(fsvq->fud);
301 fsvq->fud = NULL;
302 }
303 }
304
305 /* Read filesystem name from virtio config into fs->tag (must kfree()). */
virtio_fs_read_tag(struct virtio_device * vdev,struct virtio_fs * fs)306 static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs)
307 {
308 char tag_buf[sizeof_field(struct virtio_fs_config, tag)];
309 char *end;
310 size_t len;
311
312 virtio_cread_bytes(vdev, offsetof(struct virtio_fs_config, tag),
313 &tag_buf, sizeof(tag_buf));
314 end = memchr(tag_buf, '\0', sizeof(tag_buf));
315 if (end == tag_buf)
316 return -EINVAL; /* empty tag */
317 if (!end)
318 end = &tag_buf[sizeof(tag_buf)];
319
320 len = end - tag_buf;
321 fs->tag = devm_kmalloc(&vdev->dev, len + 1, GFP_KERNEL);
322 if (!fs->tag)
323 return -ENOMEM;
324 memcpy(fs->tag, tag_buf, len);
325 fs->tag[len] = '\0';
326
327 /* While the VIRTIO specification allows any character, newlines are
328 * awkward on mount(8) command-lines and cause problems in the sysfs
329 * "tag" attr and uevent TAG= properties. Forbid them.
330 */
331 if (strchr(fs->tag, '\n')) {
332 dev_dbg(&vdev->dev, "refusing virtiofs tag with newline character\n");
333 return -EINVAL;
334 }
335
336 return 0;
337 }
338
339 /* Work function for hiprio completion */
virtio_fs_hiprio_done_work(struct work_struct * work)340 static void virtio_fs_hiprio_done_work(struct work_struct *work)
341 {
342 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
343 done_work);
344 struct virtqueue *vq = fsvq->vq;
345
346 /* Free completed FUSE_FORGET requests */
347 spin_lock(&fsvq->lock);
348 do {
349 unsigned int len;
350 void *req;
351
352 virtqueue_disable_cb(vq);
353
354 while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
355 kfree(req);
356 dec_in_flight_req(fsvq);
357 }
358 } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
359 spin_unlock(&fsvq->lock);
360 }
361
virtio_fs_request_dispatch_work(struct work_struct * work)362 static void virtio_fs_request_dispatch_work(struct work_struct *work)
363 {
364 struct fuse_req *req;
365 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
366 dispatch_work.work);
367 int ret;
368
369 pr_debug("virtio-fs: worker %s called.\n", __func__);
370 while (1) {
371 spin_lock(&fsvq->lock);
372 req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
373 list);
374 if (!req) {
375 spin_unlock(&fsvq->lock);
376 break;
377 }
378
379 list_del_init(&req->list);
380 spin_unlock(&fsvq->lock);
381 fuse_request_end(req);
382 }
383
384 /* Dispatch pending requests */
385 while (1) {
386 spin_lock(&fsvq->lock);
387 req = list_first_entry_or_null(&fsvq->queued_reqs,
388 struct fuse_req, list);
389 if (!req) {
390 spin_unlock(&fsvq->lock);
391 return;
392 }
393 list_del_init(&req->list);
394 spin_unlock(&fsvq->lock);
395
396 ret = virtio_fs_enqueue_req(fsvq, req, true);
397 if (ret < 0) {
398 if (ret == -ENOMEM || ret == -ENOSPC) {
399 spin_lock(&fsvq->lock);
400 list_add_tail(&req->list, &fsvq->queued_reqs);
401 schedule_delayed_work(&fsvq->dispatch_work,
402 msecs_to_jiffies(1));
403 spin_unlock(&fsvq->lock);
404 return;
405 }
406 req->out.h.error = ret;
407 spin_lock(&fsvq->lock);
408 dec_in_flight_req(fsvq);
409 spin_unlock(&fsvq->lock);
410 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
411 ret);
412 fuse_request_end(req);
413 }
414 }
415 }
416
417 /*
418 * Returns 1 if queue is full and sender should wait a bit before sending
419 * next request, 0 otherwise.
420 */
send_forget_request(struct virtio_fs_vq * fsvq,struct virtio_fs_forget * forget,bool in_flight)421 static int send_forget_request(struct virtio_fs_vq *fsvq,
422 struct virtio_fs_forget *forget,
423 bool in_flight)
424 {
425 struct scatterlist sg;
426 struct virtqueue *vq;
427 int ret = 0;
428 bool notify;
429 struct virtio_fs_forget_req *req = &forget->req;
430
431 spin_lock(&fsvq->lock);
432 if (!fsvq->connected) {
433 if (in_flight)
434 dec_in_flight_req(fsvq);
435 kfree(forget);
436 goto out;
437 }
438
439 sg_init_one(&sg, req, sizeof(*req));
440 vq = fsvq->vq;
441 dev_dbg(&vq->vdev->dev, "%s\n", __func__);
442
443 ret = virtqueue_add_outbuf(vq, &sg, 1, forget, GFP_ATOMIC);
444 if (ret < 0) {
445 if (ret == -ENOMEM || ret == -ENOSPC) {
446 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n",
447 ret);
448 list_add_tail(&forget->list, &fsvq->queued_reqs);
449 schedule_delayed_work(&fsvq->dispatch_work,
450 msecs_to_jiffies(1));
451 if (!in_flight)
452 inc_in_flight_req(fsvq);
453 /* Queue is full */
454 ret = 1;
455 } else {
456 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
457 ret);
458 kfree(forget);
459 if (in_flight)
460 dec_in_flight_req(fsvq);
461 }
462 goto out;
463 }
464
465 if (!in_flight)
466 inc_in_flight_req(fsvq);
467 notify = virtqueue_kick_prepare(vq);
468 spin_unlock(&fsvq->lock);
469
470 if (notify)
471 virtqueue_notify(vq);
472 return ret;
473 out:
474 spin_unlock(&fsvq->lock);
475 return ret;
476 }
477
virtio_fs_hiprio_dispatch_work(struct work_struct * work)478 static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
479 {
480 struct virtio_fs_forget *forget;
481 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
482 dispatch_work.work);
483 pr_debug("virtio-fs: worker %s called.\n", __func__);
484 while (1) {
485 spin_lock(&fsvq->lock);
486 forget = list_first_entry_or_null(&fsvq->queued_reqs,
487 struct virtio_fs_forget, list);
488 if (!forget) {
489 spin_unlock(&fsvq->lock);
490 return;
491 }
492
493 list_del(&forget->list);
494 spin_unlock(&fsvq->lock);
495 if (send_forget_request(fsvq, forget, true))
496 return;
497 }
498 }
499
500 /* Allocate and copy args into req->argbuf */
copy_args_to_argbuf(struct fuse_req * req)501 static int copy_args_to_argbuf(struct fuse_req *req)
502 {
503 struct fuse_args *args = req->args;
504 unsigned int offset = 0;
505 unsigned int num_in;
506 unsigned int num_out;
507 unsigned int len;
508 unsigned int i;
509
510 num_in = args->in_numargs - args->in_pages;
511 num_out = args->out_numargs - args->out_pages;
512 len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) +
513 fuse_len_args(num_out, args->out_args);
514
515 req->argbuf = kmalloc(len, GFP_ATOMIC);
516 if (!req->argbuf)
517 return -ENOMEM;
518
519 for (i = 0; i < num_in; i++) {
520 memcpy(req->argbuf + offset,
521 args->in_args[i].value,
522 args->in_args[i].size);
523 offset += args->in_args[i].size;
524 }
525
526 return 0;
527 }
528
529 /* Copy args out of and free req->argbuf */
copy_args_from_argbuf(struct fuse_args * args,struct fuse_req * req)530 static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
531 {
532 unsigned int remaining;
533 unsigned int offset;
534 unsigned int num_in;
535 unsigned int num_out;
536 unsigned int i;
537
538 remaining = req->out.h.len - sizeof(req->out.h);
539 num_in = args->in_numargs - args->in_pages;
540 num_out = args->out_numargs - args->out_pages;
541 offset = fuse_len_args(num_in, (struct fuse_arg *)args->in_args);
542
543 for (i = 0; i < num_out; i++) {
544 unsigned int argsize = args->out_args[i].size;
545
546 if (args->out_argvar &&
547 i == args->out_numargs - 1 &&
548 argsize > remaining) {
549 argsize = remaining;
550 }
551
552 memcpy(args->out_args[i].value, req->argbuf + offset, argsize);
553 offset += argsize;
554
555 if (i != args->out_numargs - 1)
556 remaining -= argsize;
557 }
558
559 /* Store the actual size of the variable-length arg */
560 if (args->out_argvar)
561 args->out_args[args->out_numargs - 1].size = remaining;
562
563 kfree(req->argbuf);
564 req->argbuf = NULL;
565 }
566
567 /* Work function for request completion */
virtio_fs_request_complete(struct fuse_req * req,struct virtio_fs_vq * fsvq)568 static void virtio_fs_request_complete(struct fuse_req *req,
569 struct virtio_fs_vq *fsvq)
570 {
571 struct fuse_pqueue *fpq = &fsvq->fud->pq;
572 struct fuse_args *args;
573 struct fuse_args_pages *ap;
574 unsigned int len, i, thislen;
575 struct page *page;
576
577 /*
578 * TODO verify that server properly follows FUSE protocol
579 * (oh.uniq, oh.len)
580 */
581 args = req->args;
582 copy_args_from_argbuf(args, req);
583
584 if (args->out_pages && args->page_zeroing) {
585 len = args->out_args[args->out_numargs - 1].size;
586 ap = container_of(args, typeof(*ap), args);
587 for (i = 0; i < ap->num_pages; i++) {
588 thislen = ap->descs[i].length;
589 if (len < thislen) {
590 WARN_ON(ap->descs[i].offset);
591 page = ap->pages[i];
592 zero_user_segment(page, len, thislen);
593 len = 0;
594 } else {
595 len -= thislen;
596 }
597 }
598 }
599
600 spin_lock(&fpq->lock);
601 clear_bit(FR_SENT, &req->flags);
602 spin_unlock(&fpq->lock);
603
604 fuse_request_end(req);
605 spin_lock(&fsvq->lock);
606 dec_in_flight_req(fsvq);
607 spin_unlock(&fsvq->lock);
608 }
609
virtio_fs_complete_req_work(struct work_struct * work)610 static void virtio_fs_complete_req_work(struct work_struct *work)
611 {
612 struct virtio_fs_req_work *w =
613 container_of(work, typeof(*w), done_work);
614
615 virtio_fs_request_complete(w->req, w->fsvq);
616 kfree(w);
617 }
618
virtio_fs_requests_done_work(struct work_struct * work)619 static void virtio_fs_requests_done_work(struct work_struct *work)
620 {
621 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
622 done_work);
623 struct fuse_pqueue *fpq = &fsvq->fud->pq;
624 struct virtqueue *vq = fsvq->vq;
625 struct fuse_req *req;
626 struct fuse_req *next;
627 unsigned int len;
628 LIST_HEAD(reqs);
629
630 /* Collect completed requests off the virtqueue */
631 spin_lock(&fsvq->lock);
632 do {
633 virtqueue_disable_cb(vq);
634
635 while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
636 spin_lock(&fpq->lock);
637 list_move_tail(&req->list, &reqs);
638 spin_unlock(&fpq->lock);
639 }
640 } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
641 spin_unlock(&fsvq->lock);
642
643 /* End requests */
644 list_for_each_entry_safe(req, next, &reqs, list) {
645 list_del_init(&req->list);
646
647 /* blocking async request completes in a worker context */
648 if (req->args->may_block) {
649 struct virtio_fs_req_work *w;
650
651 w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL);
652 INIT_WORK(&w->done_work, virtio_fs_complete_req_work);
653 w->fsvq = fsvq;
654 w->req = req;
655 schedule_work(&w->done_work);
656 } else {
657 virtio_fs_request_complete(req, fsvq);
658 }
659 }
660 }
661
662 /* Virtqueue interrupt handler */
virtio_fs_vq_done(struct virtqueue * vq)663 static void virtio_fs_vq_done(struct virtqueue *vq)
664 {
665 struct virtio_fs_vq *fsvq = vq_to_fsvq(vq);
666
667 dev_dbg(&vq->vdev->dev, "%s %s\n", __func__, fsvq->name);
668
669 schedule_work(&fsvq->done_work);
670 }
671
virtio_fs_init_vq(struct virtio_fs_vq * fsvq,char * name,int vq_type)672 static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name,
673 int vq_type)
674 {
675 strscpy(fsvq->name, name, VQ_NAME_LEN);
676 spin_lock_init(&fsvq->lock);
677 INIT_LIST_HEAD(&fsvq->queued_reqs);
678 INIT_LIST_HEAD(&fsvq->end_reqs);
679 init_completion(&fsvq->in_flight_zero);
680
681 if (vq_type == VQ_REQUEST) {
682 INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work);
683 INIT_DELAYED_WORK(&fsvq->dispatch_work,
684 virtio_fs_request_dispatch_work);
685 } else {
686 INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work);
687 INIT_DELAYED_WORK(&fsvq->dispatch_work,
688 virtio_fs_hiprio_dispatch_work);
689 }
690 }
691
692 /* Initialize virtqueues */
virtio_fs_setup_vqs(struct virtio_device * vdev,struct virtio_fs * fs)693 static int virtio_fs_setup_vqs(struct virtio_device *vdev,
694 struct virtio_fs *fs)
695 {
696 struct virtqueue **vqs;
697 vq_callback_t **callbacks;
698 const char **names;
699 unsigned int i;
700 int ret = 0;
701
702 virtio_cread_le(vdev, struct virtio_fs_config, num_request_queues,
703 &fs->num_request_queues);
704 if (fs->num_request_queues == 0)
705 return -EINVAL;
706
707 fs->nvqs = VQ_REQUEST + fs->num_request_queues;
708 fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
709 if (!fs->vqs)
710 return -ENOMEM;
711
712 vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL);
713 callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]),
714 GFP_KERNEL);
715 names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL);
716 if (!vqs || !callbacks || !names) {
717 ret = -ENOMEM;
718 goto out;
719 }
720
721 /* Initialize the hiprio/forget request virtqueue */
722 callbacks[VQ_HIPRIO] = virtio_fs_vq_done;
723 virtio_fs_init_vq(&fs->vqs[VQ_HIPRIO], "hiprio", VQ_HIPRIO);
724 names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name;
725
726 /* Initialize the requests virtqueues */
727 for (i = VQ_REQUEST; i < fs->nvqs; i++) {
728 char vq_name[VQ_NAME_LEN];
729
730 snprintf(vq_name, VQ_NAME_LEN, "requests.%u", i - VQ_REQUEST);
731 virtio_fs_init_vq(&fs->vqs[i], vq_name, VQ_REQUEST);
732 callbacks[i] = virtio_fs_vq_done;
733 names[i] = fs->vqs[i].name;
734 }
735
736 ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL);
737 if (ret < 0)
738 goto out;
739
740 for (i = 0; i < fs->nvqs; i++)
741 fs->vqs[i].vq = vqs[i];
742
743 virtio_fs_start_all_queues(fs);
744 out:
745 kfree(names);
746 kfree(callbacks);
747 kfree(vqs);
748 if (ret)
749 kfree(fs->vqs);
750 return ret;
751 }
752
753 /* Free virtqueues (device must already be reset) */
virtio_fs_cleanup_vqs(struct virtio_device * vdev)754 static void virtio_fs_cleanup_vqs(struct virtio_device *vdev)
755 {
756 vdev->config->del_vqs(vdev);
757 }
758
759 /* Map a window offset to a page frame number. The window offset will have
760 * been produced by .iomap_begin(), which maps a file offset to a window
761 * offset.
762 */
virtio_fs_direct_access(struct dax_device * dax_dev,pgoff_t pgoff,long nr_pages,enum dax_access_mode mode,void ** kaddr,pfn_t * pfn)763 static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
764 long nr_pages, enum dax_access_mode mode,
765 void **kaddr, pfn_t *pfn)
766 {
767 struct virtio_fs *fs = dax_get_private(dax_dev);
768 phys_addr_t offset = PFN_PHYS(pgoff);
769 size_t max_nr_pages = fs->window_len / PAGE_SIZE - pgoff;
770
771 if (kaddr)
772 *kaddr = fs->window_kaddr + offset;
773 if (pfn)
774 *pfn = phys_to_pfn_t(fs->window_phys_addr + offset,
775 PFN_DEV | PFN_MAP);
776 return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
777 }
778
virtio_fs_zero_page_range(struct dax_device * dax_dev,pgoff_t pgoff,size_t nr_pages)779 static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
780 pgoff_t pgoff, size_t nr_pages)
781 {
782 long rc;
783 void *kaddr;
784
785 rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS, &kaddr,
786 NULL);
787 if (rc < 0)
788 return dax_mem2blk_err(rc);
789
790 memset(kaddr, 0, nr_pages << PAGE_SHIFT);
791 dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
792 return 0;
793 }
794
795 static const struct dax_operations virtio_fs_dax_ops = {
796 .direct_access = virtio_fs_direct_access,
797 .zero_page_range = virtio_fs_zero_page_range,
798 };
799
virtio_fs_cleanup_dax(void * data)800 static void virtio_fs_cleanup_dax(void *data)
801 {
802 struct dax_device *dax_dev = data;
803
804 kill_dax(dax_dev);
805 put_dax(dax_dev);
806 }
807
virtio_fs_setup_dax(struct virtio_device * vdev,struct virtio_fs * fs)808 static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs)
809 {
810 struct virtio_shm_region cache_reg;
811 struct dev_pagemap *pgmap;
812 bool have_cache;
813
814 if (!IS_ENABLED(CONFIG_FUSE_DAX))
815 return 0;
816
817 /* Get cache region */
818 have_cache = virtio_get_shm_region(vdev, &cache_reg,
819 (u8)VIRTIO_FS_SHMCAP_ID_CACHE);
820 if (!have_cache) {
821 dev_notice(&vdev->dev, "%s: No cache capability\n", __func__);
822 return 0;
823 }
824
825 if (!devm_request_mem_region(&vdev->dev, cache_reg.addr, cache_reg.len,
826 dev_name(&vdev->dev))) {
827 dev_warn(&vdev->dev, "could not reserve region addr=0x%llx len=0x%llx\n",
828 cache_reg.addr, cache_reg.len);
829 return -EBUSY;
830 }
831
832 dev_notice(&vdev->dev, "Cache len: 0x%llx @ 0x%llx\n", cache_reg.len,
833 cache_reg.addr);
834
835 pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL);
836 if (!pgmap)
837 return -ENOMEM;
838
839 pgmap->type = MEMORY_DEVICE_FS_DAX;
840
841 /* Ideally we would directly use the PCI BAR resource but
842 * devm_memremap_pages() wants its own copy in pgmap. So
843 * initialize a struct resource from scratch (only the start
844 * and end fields will be used).
845 */
846 pgmap->range = (struct range) {
847 .start = (phys_addr_t) cache_reg.addr,
848 .end = (phys_addr_t) cache_reg.addr + cache_reg.len - 1,
849 };
850 pgmap->nr_range = 1;
851
852 fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap);
853 if (IS_ERR(fs->window_kaddr))
854 return PTR_ERR(fs->window_kaddr);
855
856 fs->window_phys_addr = (phys_addr_t) cache_reg.addr;
857 fs->window_len = (phys_addr_t) cache_reg.len;
858
859 dev_dbg(&vdev->dev, "%s: window kaddr 0x%px phys_addr 0x%llx len 0x%llx\n",
860 __func__, fs->window_kaddr, cache_reg.addr, cache_reg.len);
861
862 fs->dax_dev = alloc_dax(fs, &virtio_fs_dax_ops);
863 if (IS_ERR(fs->dax_dev))
864 return PTR_ERR(fs->dax_dev);
865
866 return devm_add_action_or_reset(&vdev->dev, virtio_fs_cleanup_dax,
867 fs->dax_dev);
868 }
869
virtio_fs_probe(struct virtio_device * vdev)870 static int virtio_fs_probe(struct virtio_device *vdev)
871 {
872 struct virtio_fs *fs;
873 int ret;
874
875 fs = kzalloc(sizeof(*fs), GFP_KERNEL);
876 if (!fs)
877 return -ENOMEM;
878 kref_init(&fs->refcount);
879 vdev->priv = fs;
880
881 ret = virtio_fs_read_tag(vdev, fs);
882 if (ret < 0)
883 goto out;
884
885 ret = virtio_fs_setup_vqs(vdev, fs);
886 if (ret < 0)
887 goto out;
888
889 /* TODO vq affinity */
890
891 ret = virtio_fs_setup_dax(vdev, fs);
892 if (ret < 0)
893 goto out_vqs;
894
895 /* Bring the device online in case the filesystem is mounted and
896 * requests need to be sent before we return.
897 */
898 virtio_device_ready(vdev);
899
900 ret = virtio_fs_add_instance(fs);
901 if (ret < 0)
902 goto out_vqs;
903
904 return 0;
905
906 out_vqs:
907 virtio_reset_device(vdev);
908 virtio_fs_cleanup_vqs(vdev);
909 kfree(fs->vqs);
910
911 out:
912 vdev->priv = NULL;
913 kfree(fs);
914 return ret;
915 }
916
virtio_fs_stop_all_queues(struct virtio_fs * fs)917 static void virtio_fs_stop_all_queues(struct virtio_fs *fs)
918 {
919 struct virtio_fs_vq *fsvq;
920 int i;
921
922 for (i = 0; i < fs->nvqs; i++) {
923 fsvq = &fs->vqs[i];
924 spin_lock(&fsvq->lock);
925 fsvq->connected = false;
926 spin_unlock(&fsvq->lock);
927 }
928 }
929
virtio_fs_remove(struct virtio_device * vdev)930 static void virtio_fs_remove(struct virtio_device *vdev)
931 {
932 struct virtio_fs *fs = vdev->priv;
933
934 mutex_lock(&virtio_fs_mutex);
935 /* This device is going away. No one should get new reference */
936 list_del_init(&fs->list);
937 virtio_fs_stop_all_queues(fs);
938 virtio_fs_drain_all_queues_locked(fs);
939 virtio_reset_device(vdev);
940 virtio_fs_cleanup_vqs(vdev);
941
942 vdev->priv = NULL;
943 /* Put device reference on virtio_fs object */
944 virtio_fs_put(fs);
945 mutex_unlock(&virtio_fs_mutex);
946 }
947
948 #ifdef CONFIG_PM_SLEEP
virtio_fs_freeze(struct virtio_device * vdev)949 static int virtio_fs_freeze(struct virtio_device *vdev)
950 {
951 /* TODO need to save state here */
952 pr_warn("virtio-fs: suspend/resume not yet supported\n");
953 return -EOPNOTSUPP;
954 }
955
virtio_fs_restore(struct virtio_device * vdev)956 static int virtio_fs_restore(struct virtio_device *vdev)
957 {
958 /* TODO need to restore state here */
959 return 0;
960 }
961 #endif /* CONFIG_PM_SLEEP */
962
963 static const struct virtio_device_id id_table[] = {
964 { VIRTIO_ID_FS, VIRTIO_DEV_ANY_ID },
965 {},
966 };
967
968 static const unsigned int feature_table[] = {};
969
970 static struct virtio_driver virtio_fs_driver = {
971 .driver.name = KBUILD_MODNAME,
972 .driver.owner = THIS_MODULE,
973 .id_table = id_table,
974 .feature_table = feature_table,
975 .feature_table_size = ARRAY_SIZE(feature_table),
976 .probe = virtio_fs_probe,
977 .remove = virtio_fs_remove,
978 #ifdef CONFIG_PM_SLEEP
979 .freeze = virtio_fs_freeze,
980 .restore = virtio_fs_restore,
981 #endif
982 };
983
virtio_fs_wake_forget_and_unlock(struct fuse_iqueue * fiq)984 static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq)
985 __releases(fiq->lock)
986 {
987 struct fuse_forget_link *link;
988 struct virtio_fs_forget *forget;
989 struct virtio_fs_forget_req *req;
990 struct virtio_fs *fs;
991 struct virtio_fs_vq *fsvq;
992 u64 unique;
993
994 link = fuse_dequeue_forget(fiq, 1, NULL);
995 unique = fuse_get_unique(fiq);
996
997 fs = fiq->priv;
998 fsvq = &fs->vqs[VQ_HIPRIO];
999 spin_unlock(&fiq->lock);
1000
1001 /* Allocate a buffer for the request */
1002 forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
1003 req = &forget->req;
1004
1005 req->ih = (struct fuse_in_header){
1006 .opcode = FUSE_FORGET,
1007 .nodeid = link->forget_one.nodeid,
1008 .unique = unique,
1009 .len = sizeof(*req),
1010 };
1011 req->arg = (struct fuse_forget_in){
1012 .nlookup = link->forget_one.nlookup,
1013 };
1014
1015 send_forget_request(fsvq, forget, false);
1016 kfree(link);
1017 }
1018
virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue * fiq)1019 static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq)
1020 __releases(fiq->lock)
1021 {
1022 /*
1023 * TODO interrupts.
1024 *
1025 * Normal fs operations on a local filesystems aren't interruptible.
1026 * Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
1027 * with shared lock between host and guest.
1028 */
1029 spin_unlock(&fiq->lock);
1030 }
1031
1032 /* Count number of scatter-gather elements required */
sg_count_fuse_pages(struct fuse_page_desc * page_descs,unsigned int num_pages,unsigned int total_len)1033 static unsigned int sg_count_fuse_pages(struct fuse_page_desc *page_descs,
1034 unsigned int num_pages,
1035 unsigned int total_len)
1036 {
1037 unsigned int i;
1038 unsigned int this_len;
1039
1040 for (i = 0; i < num_pages && total_len; i++) {
1041 this_len = min(page_descs[i].length, total_len);
1042 total_len -= this_len;
1043 }
1044
1045 return i;
1046 }
1047
1048 /* Return the number of scatter-gather list elements required */
sg_count_fuse_req(struct fuse_req * req)1049 static unsigned int sg_count_fuse_req(struct fuse_req *req)
1050 {
1051 struct fuse_args *args = req->args;
1052 struct fuse_args_pages *ap = container_of(args, typeof(*ap), args);
1053 unsigned int size, total_sgs = 1 /* fuse_in_header */;
1054
1055 if (args->in_numargs - args->in_pages)
1056 total_sgs += 1;
1057
1058 if (args->in_pages) {
1059 size = args->in_args[args->in_numargs - 1].size;
1060 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
1061 size);
1062 }
1063
1064 if (!test_bit(FR_ISREPLY, &req->flags))
1065 return total_sgs;
1066
1067 total_sgs += 1 /* fuse_out_header */;
1068
1069 if (args->out_numargs - args->out_pages)
1070 total_sgs += 1;
1071
1072 if (args->out_pages) {
1073 size = args->out_args[args->out_numargs - 1].size;
1074 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
1075 size);
1076 }
1077
1078 return total_sgs;
1079 }
1080
1081 /* Add pages to scatter-gather list and return number of elements used */
sg_init_fuse_pages(struct scatterlist * sg,struct page ** pages,struct fuse_page_desc * page_descs,unsigned int num_pages,unsigned int total_len)1082 static unsigned int sg_init_fuse_pages(struct scatterlist *sg,
1083 struct page **pages,
1084 struct fuse_page_desc *page_descs,
1085 unsigned int num_pages,
1086 unsigned int total_len)
1087 {
1088 unsigned int i;
1089 unsigned int this_len;
1090
1091 for (i = 0; i < num_pages && total_len; i++) {
1092 sg_init_table(&sg[i], 1);
1093 this_len = min(page_descs[i].length, total_len);
1094 sg_set_page(&sg[i], pages[i], this_len, page_descs[i].offset);
1095 total_len -= this_len;
1096 }
1097
1098 return i;
1099 }
1100
1101 /* Add args to scatter-gather list and return number of elements used */
sg_init_fuse_args(struct scatterlist * sg,struct fuse_req * req,struct fuse_arg * args,unsigned int numargs,bool argpages,void * argbuf,unsigned int * len_used)1102 static unsigned int sg_init_fuse_args(struct scatterlist *sg,
1103 struct fuse_req *req,
1104 struct fuse_arg *args,
1105 unsigned int numargs,
1106 bool argpages,
1107 void *argbuf,
1108 unsigned int *len_used)
1109 {
1110 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
1111 unsigned int total_sgs = 0;
1112 unsigned int len;
1113
1114 len = fuse_len_args(numargs - argpages, args);
1115 if (len)
1116 sg_init_one(&sg[total_sgs++], argbuf, len);
1117
1118 if (argpages)
1119 total_sgs += sg_init_fuse_pages(&sg[total_sgs],
1120 ap->pages, ap->descs,
1121 ap->num_pages,
1122 args[numargs - 1].size);
1123
1124 if (len_used)
1125 *len_used = len;
1126
1127 return total_sgs;
1128 }
1129
1130 /* Add a request to a virtqueue and kick the device */
virtio_fs_enqueue_req(struct virtio_fs_vq * fsvq,struct fuse_req * req,bool in_flight)1131 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
1132 struct fuse_req *req, bool in_flight)
1133 {
1134 /* requests need at least 4 elements */
1135 struct scatterlist *stack_sgs[6];
1136 struct scatterlist stack_sg[ARRAY_SIZE(stack_sgs)];
1137 struct scatterlist **sgs = stack_sgs;
1138 struct scatterlist *sg = stack_sg;
1139 struct virtqueue *vq;
1140 struct fuse_args *args = req->args;
1141 unsigned int argbuf_used = 0;
1142 unsigned int out_sgs = 0;
1143 unsigned int in_sgs = 0;
1144 unsigned int total_sgs;
1145 unsigned int i;
1146 int ret;
1147 bool notify;
1148 struct fuse_pqueue *fpq;
1149
1150 /* Does the sglist fit on the stack? */
1151 total_sgs = sg_count_fuse_req(req);
1152 if (total_sgs > ARRAY_SIZE(stack_sgs)) {
1153 sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), GFP_ATOMIC);
1154 sg = kmalloc_array(total_sgs, sizeof(sg[0]), GFP_ATOMIC);
1155 if (!sgs || !sg) {
1156 ret = -ENOMEM;
1157 goto out;
1158 }
1159 }
1160
1161 /* Use a bounce buffer since stack args cannot be mapped */
1162 ret = copy_args_to_argbuf(req);
1163 if (ret < 0)
1164 goto out;
1165
1166 /* Request elements */
1167 sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h));
1168 out_sgs += sg_init_fuse_args(&sg[out_sgs], req,
1169 (struct fuse_arg *)args->in_args,
1170 args->in_numargs, args->in_pages,
1171 req->argbuf, &argbuf_used);
1172
1173 /* Reply elements */
1174 if (test_bit(FR_ISREPLY, &req->flags)) {
1175 sg_init_one(&sg[out_sgs + in_sgs++],
1176 &req->out.h, sizeof(req->out.h));
1177 in_sgs += sg_init_fuse_args(&sg[out_sgs + in_sgs], req,
1178 args->out_args, args->out_numargs,
1179 args->out_pages,
1180 req->argbuf + argbuf_used, NULL);
1181 }
1182
1183 WARN_ON(out_sgs + in_sgs != total_sgs);
1184
1185 for (i = 0; i < total_sgs; i++)
1186 sgs[i] = &sg[i];
1187
1188 spin_lock(&fsvq->lock);
1189
1190 if (!fsvq->connected) {
1191 spin_unlock(&fsvq->lock);
1192 ret = -ENOTCONN;
1193 goto out;
1194 }
1195
1196 vq = fsvq->vq;
1197 ret = virtqueue_add_sgs(vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC);
1198 if (ret < 0) {
1199 spin_unlock(&fsvq->lock);
1200 goto out;
1201 }
1202
1203 /* Request successfully sent. */
1204 fpq = &fsvq->fud->pq;
1205 spin_lock(&fpq->lock);
1206 list_add_tail(&req->list, fpq->processing);
1207 spin_unlock(&fpq->lock);
1208 set_bit(FR_SENT, &req->flags);
1209 /* matches barrier in request_wait_answer() */
1210 smp_mb__after_atomic();
1211
1212 if (!in_flight)
1213 inc_in_flight_req(fsvq);
1214 notify = virtqueue_kick_prepare(vq);
1215
1216 spin_unlock(&fsvq->lock);
1217
1218 if (notify)
1219 virtqueue_notify(vq);
1220
1221 out:
1222 if (ret < 0 && req->argbuf) {
1223 kfree(req->argbuf);
1224 req->argbuf = NULL;
1225 }
1226 if (sgs != stack_sgs) {
1227 kfree(sgs);
1228 kfree(sg);
1229 }
1230
1231 return ret;
1232 }
1233
virtio_fs_wake_pending_and_unlock(struct fuse_iqueue * fiq)1234 static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
1235 __releases(fiq->lock)
1236 {
1237 unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
1238 struct virtio_fs *fs;
1239 struct fuse_req *req;
1240 struct virtio_fs_vq *fsvq;
1241 int ret;
1242
1243 WARN_ON(list_empty(&fiq->pending));
1244 req = list_last_entry(&fiq->pending, struct fuse_req, list);
1245 clear_bit(FR_PENDING, &req->flags);
1246 list_del_init(&req->list);
1247 WARN_ON(!list_empty(&fiq->pending));
1248 spin_unlock(&fiq->lock);
1249
1250 fs = fiq->priv;
1251
1252 pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n",
1253 __func__, req->in.h.opcode, req->in.h.unique,
1254 req->in.h.nodeid, req->in.h.len,
1255 fuse_len_args(req->args->out_numargs, req->args->out_args));
1256
1257 fsvq = &fs->vqs[queue_id];
1258 ret = virtio_fs_enqueue_req(fsvq, req, false);
1259 if (ret < 0) {
1260 if (ret == -ENOMEM || ret == -ENOSPC) {
1261 /*
1262 * Virtqueue full. Retry submission from worker
1263 * context as we might be holding fc->bg_lock.
1264 */
1265 spin_lock(&fsvq->lock);
1266 list_add_tail(&req->list, &fsvq->queued_reqs);
1267 inc_in_flight_req(fsvq);
1268 schedule_delayed_work(&fsvq->dispatch_work,
1269 msecs_to_jiffies(1));
1270 spin_unlock(&fsvq->lock);
1271 return;
1272 }
1273 req->out.h.error = ret;
1274 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
1275
1276 /* Can't end request in submission context. Use a worker */
1277 spin_lock(&fsvq->lock);
1278 list_add_tail(&req->list, &fsvq->end_reqs);
1279 schedule_delayed_work(&fsvq->dispatch_work, 0);
1280 spin_unlock(&fsvq->lock);
1281 return;
1282 }
1283 }
1284
1285 static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
1286 .wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock,
1287 .wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock,
1288 .wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock,
1289 .release = virtio_fs_fiq_release,
1290 };
1291
virtio_fs_ctx_set_defaults(struct fuse_fs_context * ctx)1292 static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)
1293 {
1294 ctx->rootmode = S_IFDIR;
1295 ctx->default_permissions = 1;
1296 ctx->allow_other = 1;
1297 ctx->max_read = UINT_MAX;
1298 ctx->blksize = 512;
1299 ctx->destroy = true;
1300 ctx->no_control = true;
1301 ctx->no_force_umount = true;
1302 }
1303
virtio_fs_fill_super(struct super_block * sb,struct fs_context * fsc)1304 static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
1305 {
1306 struct fuse_mount *fm = get_fuse_mount_super(sb);
1307 struct fuse_conn *fc = fm->fc;
1308 struct virtio_fs *fs = fc->iq.priv;
1309 struct fuse_fs_context *ctx = fsc->fs_private;
1310 unsigned int i;
1311 int err;
1312
1313 virtio_fs_ctx_set_defaults(ctx);
1314 mutex_lock(&virtio_fs_mutex);
1315
1316 /* After holding mutex, make sure virtiofs device is still there.
1317 * Though we are holding a reference to it, drive ->remove might
1318 * still have cleaned up virtual queues. In that case bail out.
1319 */
1320 err = -EINVAL;
1321 if (list_empty(&fs->list)) {
1322 pr_info("virtio-fs: tag <%s> not found\n", fs->tag);
1323 goto err;
1324 }
1325
1326 err = -ENOMEM;
1327 /* Allocate fuse_dev for hiprio and notification queues */
1328 for (i = 0; i < fs->nvqs; i++) {
1329 struct virtio_fs_vq *fsvq = &fs->vqs[i];
1330
1331 fsvq->fud = fuse_dev_alloc();
1332 if (!fsvq->fud)
1333 goto err_free_fuse_devs;
1334 }
1335
1336 /* virtiofs allocates and installs its own fuse devices */
1337 ctx->fudptr = NULL;
1338 if (ctx->dax_mode != FUSE_DAX_NEVER) {
1339 if (ctx->dax_mode == FUSE_DAX_ALWAYS && !fs->dax_dev) {
1340 err = -EINVAL;
1341 pr_err("virtio-fs: dax can't be enabled as filesystem"
1342 " device does not support it.\n");
1343 goto err_free_fuse_devs;
1344 }
1345 ctx->dax_dev = fs->dax_dev;
1346 }
1347 err = fuse_fill_super_common(sb, ctx);
1348 if (err < 0)
1349 goto err_free_fuse_devs;
1350
1351 for (i = 0; i < fs->nvqs; i++) {
1352 struct virtio_fs_vq *fsvq = &fs->vqs[i];
1353
1354 fuse_dev_install(fsvq->fud, fc);
1355 }
1356
1357 /* Previous unmount will stop all queues. Start these again */
1358 virtio_fs_start_all_queues(fs);
1359 fuse_send_init(fm);
1360 mutex_unlock(&virtio_fs_mutex);
1361 return 0;
1362
1363 err_free_fuse_devs:
1364 virtio_fs_free_devs(fs);
1365 err:
1366 mutex_unlock(&virtio_fs_mutex);
1367 return err;
1368 }
1369
virtio_fs_conn_destroy(struct fuse_mount * fm)1370 static void virtio_fs_conn_destroy(struct fuse_mount *fm)
1371 {
1372 struct fuse_conn *fc = fm->fc;
1373 struct virtio_fs *vfs = fc->iq.priv;
1374 struct virtio_fs_vq *fsvq = &vfs->vqs[VQ_HIPRIO];
1375
1376 /* Stop dax worker. Soon evict_inodes() will be called which
1377 * will free all memory ranges belonging to all inodes.
1378 */
1379 if (IS_ENABLED(CONFIG_FUSE_DAX))
1380 fuse_dax_cancel_work(fc);
1381
1382 /* Stop forget queue. Soon destroy will be sent */
1383 spin_lock(&fsvq->lock);
1384 fsvq->connected = false;
1385 spin_unlock(&fsvq->lock);
1386 virtio_fs_drain_all_queues(vfs);
1387
1388 fuse_conn_destroy(fm);
1389
1390 /* fuse_conn_destroy() must have sent destroy. Stop all queues
1391 * and drain one more time and free fuse devices. Freeing fuse
1392 * devices will drop their reference on fuse_conn and that in
1393 * turn will drop its reference on virtio_fs object.
1394 */
1395 virtio_fs_stop_all_queues(vfs);
1396 virtio_fs_drain_all_queues(vfs);
1397 virtio_fs_free_devs(vfs);
1398 }
1399
virtio_kill_sb(struct super_block * sb)1400 static void virtio_kill_sb(struct super_block *sb)
1401 {
1402 struct fuse_mount *fm = get_fuse_mount_super(sb);
1403 bool last;
1404
1405 /* If mount failed, we can still be called without any fc */
1406 if (sb->s_root) {
1407 last = fuse_mount_remove(fm);
1408 if (last)
1409 virtio_fs_conn_destroy(fm);
1410 }
1411 kill_anon_super(sb);
1412 fuse_mount_destroy(fm);
1413 }
1414
virtio_fs_test_super(struct super_block * sb,struct fs_context * fsc)1415 static int virtio_fs_test_super(struct super_block *sb,
1416 struct fs_context *fsc)
1417 {
1418 struct fuse_mount *fsc_fm = fsc->s_fs_info;
1419 struct fuse_mount *sb_fm = get_fuse_mount_super(sb);
1420
1421 return fsc_fm->fc->iq.priv == sb_fm->fc->iq.priv;
1422 }
1423
virtio_fs_get_tree(struct fs_context * fsc)1424 static int virtio_fs_get_tree(struct fs_context *fsc)
1425 {
1426 struct virtio_fs *fs;
1427 struct super_block *sb;
1428 struct fuse_conn *fc = NULL;
1429 struct fuse_mount *fm;
1430 unsigned int virtqueue_size;
1431 int err = -EIO;
1432
1433 /* This gets a reference on virtio_fs object. This ptr gets installed
1434 * in fc->iq->priv. Once fuse_conn is going away, it calls ->put()
1435 * to drop the reference to this object.
1436 */
1437 fs = virtio_fs_find_instance(fsc->source);
1438 if (!fs) {
1439 pr_info("virtio-fs: tag <%s> not found\n", fsc->source);
1440 return -EINVAL;
1441 }
1442
1443 virtqueue_size = virtqueue_get_vring_size(fs->vqs[VQ_REQUEST].vq);
1444 if (WARN_ON(virtqueue_size <= FUSE_HEADER_OVERHEAD))
1445 goto out_err;
1446
1447 err = -ENOMEM;
1448 fc = kzalloc(sizeof(struct fuse_conn), GFP_KERNEL);
1449 if (!fc)
1450 goto out_err;
1451
1452 fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
1453 if (!fm)
1454 goto out_err;
1455
1456 fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
1457 fc->release = fuse_free_conn;
1458 fc->delete_stale = true;
1459 fc->auto_submounts = true;
1460 fc->sync_fs = true;
1461
1462 /* Tell FUSE to split requests that exceed the virtqueue's size */
1463 fc->max_pages_limit = min_t(unsigned int, fc->max_pages_limit,
1464 virtqueue_size - FUSE_HEADER_OVERHEAD);
1465
1466 fsc->s_fs_info = fm;
1467 sb = sget_fc(fsc, virtio_fs_test_super, set_anon_super_fc);
1468 if (fsc->s_fs_info)
1469 fuse_mount_destroy(fm);
1470 if (IS_ERR(sb))
1471 return PTR_ERR(sb);
1472
1473 if (!sb->s_root) {
1474 err = virtio_fs_fill_super(sb, fsc);
1475 if (err) {
1476 deactivate_locked_super(sb);
1477 return err;
1478 }
1479
1480 sb->s_flags |= SB_ACTIVE;
1481 }
1482
1483 WARN_ON(fsc->root);
1484 fsc->root = dget(sb->s_root);
1485 return 0;
1486
1487 out_err:
1488 kfree(fc);
1489 mutex_lock(&virtio_fs_mutex);
1490 virtio_fs_put(fs);
1491 mutex_unlock(&virtio_fs_mutex);
1492 return err;
1493 }
1494
1495 static const struct fs_context_operations virtio_fs_context_ops = {
1496 .free = virtio_fs_free_fsc,
1497 .parse_param = virtio_fs_parse_param,
1498 .get_tree = virtio_fs_get_tree,
1499 };
1500
virtio_fs_init_fs_context(struct fs_context * fsc)1501 static int virtio_fs_init_fs_context(struct fs_context *fsc)
1502 {
1503 struct fuse_fs_context *ctx;
1504
1505 if (fsc->purpose == FS_CONTEXT_FOR_SUBMOUNT)
1506 return fuse_init_fs_context_submount(fsc);
1507
1508 ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL);
1509 if (!ctx)
1510 return -ENOMEM;
1511 fsc->fs_private = ctx;
1512 fsc->ops = &virtio_fs_context_ops;
1513 return 0;
1514 }
1515
1516 static struct file_system_type virtio_fs_type = {
1517 .owner = THIS_MODULE,
1518 .name = "virtiofs",
1519 .init_fs_context = virtio_fs_init_fs_context,
1520 .kill_sb = virtio_kill_sb,
1521 };
1522
virtio_fs_init(void)1523 static int __init virtio_fs_init(void)
1524 {
1525 int ret;
1526
1527 ret = register_virtio_driver(&virtio_fs_driver);
1528 if (ret < 0)
1529 return ret;
1530
1531 ret = register_filesystem(&virtio_fs_type);
1532 if (ret < 0) {
1533 unregister_virtio_driver(&virtio_fs_driver);
1534 return ret;
1535 }
1536
1537 return 0;
1538 }
1539 module_init(virtio_fs_init);
1540
virtio_fs_exit(void)1541 static void __exit virtio_fs_exit(void)
1542 {
1543 unregister_filesystem(&virtio_fs_type);
1544 unregister_virtio_driver(&virtio_fs_driver);
1545 }
1546 module_exit(virtio_fs_exit);
1547
1548 MODULE_AUTHOR("Stefan Hajnoczi <stefanha@redhat.com>");
1549 MODULE_DESCRIPTION("Virtio Filesystem");
1550 MODULE_LICENSE("GPL");
1551 MODULE_ALIAS_FS(KBUILD_MODNAME);
1552 MODULE_DEVICE_TABLE(virtio, id_table);
1553