Lines Matching +full:fiq +full:- +full:device

1 // SPDX-License-Identifier: GPL-2.0
3 * virtio-fs: Virtio Filesystem
28 /* List of virtio-fs device instances and a lock for the list. Also provides
29 * mutual exclusion in device removal and mounting path
41 /* Per-virtqueue state */
44 struct virtqueue *vq; /* protected by ->lock */
56 /* A virtio-fs device instance */
114 struct fuse_fs_context *ctx = fsc->fs_private; in virtio_fs_parse_param()
123 ctx->dax_mode = FUSE_DAX_ALWAYS; in virtio_fs_parse_param()
126 ctx->dax_mode = result.uint_32; in virtio_fs_parse_param()
129 return -EINVAL; in virtio_fs_parse_param()
137 struct fuse_fs_context *ctx = fsc->fs_private; in virtio_fs_free_fsc()
144 struct virtio_fs *fs = vq->vdev->priv; in vq_to_fsvq()
146 return &fs->vqs[vq->index]; in vq_to_fsvq()
149 /* Should be called with fsvq->lock held. */
152 fsvq->in_flight++; in inc_in_flight_req()
155 /* Should be called with fsvq->lock held. */
158 WARN_ON(fsvq->in_flight <= 0); in dec_in_flight_req()
159 fsvq->in_flight--; in dec_in_flight_req()
160 if (!fsvq->in_flight) in dec_in_flight_req()
161 complete(&fsvq->in_flight_zero); in dec_in_flight_req()
168 kfree(vfs->vqs); in release_virtio_fs_obj()
175 kref_put(&fs->refcount, release_virtio_fs_obj); in virtio_fs_put()
178 static void virtio_fs_fiq_release(struct fuse_iqueue *fiq) in virtio_fs_fiq_release() argument
180 struct virtio_fs *vfs = fiq->priv; in virtio_fs_fiq_release()
189 WARN_ON(fsvq->in_flight < 0); in virtio_fs_drain_queue()
192 spin_lock(&fsvq->lock); in virtio_fs_drain_queue()
193 if (fsvq->in_flight) { in virtio_fs_drain_queue()
197 reinit_completion(&fsvq->in_flight_zero); in virtio_fs_drain_queue()
198 spin_unlock(&fsvq->lock); in virtio_fs_drain_queue()
199 wait_for_completion(&fsvq->in_flight_zero); in virtio_fs_drain_queue()
201 spin_unlock(&fsvq->lock); in virtio_fs_drain_queue()
204 flush_work(&fsvq->done_work); in virtio_fs_drain_queue()
205 flush_delayed_work(&fsvq->dispatch_work); in virtio_fs_drain_queue()
213 for (i = 0; i < fs->nvqs; i++) { in virtio_fs_drain_all_queues_locked()
214 fsvq = &fs->vqs[i]; in virtio_fs_drain_all_queues_locked()
221 /* Provides mutual exclusion between ->remove and ->kill_sb in virtio_fs_drain_all_queues()
237 for (i = 0; i < fs->nvqs; i++) { in virtio_fs_start_all_queues()
238 fsvq = &fs->vqs[i]; in virtio_fs_start_all_queues()
239 spin_lock(&fsvq->lock); in virtio_fs_start_all_queues()
240 fsvq->connected = true; in virtio_fs_start_all_queues()
241 spin_unlock(&fsvq->lock); in virtio_fs_start_all_queues()
245 /* Add a new instance to the list or return -EEXIST if tag name exists*/
254 if (strcmp(fs->tag, fs2->tag) == 0) in virtio_fs_add_instance()
259 list_add_tail(&fs->list, &virtio_fs_instances); in virtio_fs_add_instance()
264 return -EEXIST; in virtio_fs_add_instance()
276 if (strcmp(fs->tag, tag) == 0) { in virtio_fs_find_instance()
277 kref_get(&fs->refcount); in virtio_fs_find_instance()
294 for (i = 0; i < fs->nvqs; i++) { in virtio_fs_free_devs()
295 struct virtio_fs_vq *fsvq = &fs->vqs[i]; in virtio_fs_free_devs()
297 if (!fsvq->fud) in virtio_fs_free_devs()
300 fuse_dev_free(fsvq->fud); in virtio_fs_free_devs()
301 fsvq->fud = NULL; in virtio_fs_free_devs()
305 /* Read filesystem name from virtio config into fs->tag (must kfree()). */
316 return -EINVAL; /* empty tag */ in virtio_fs_read_tag()
320 len = end - tag_buf; in virtio_fs_read_tag()
321 fs->tag = devm_kmalloc(&vdev->dev, len + 1, GFP_KERNEL); in virtio_fs_read_tag()
322 if (!fs->tag) in virtio_fs_read_tag()
323 return -ENOMEM; in virtio_fs_read_tag()
324 memcpy(fs->tag, tag_buf, len); in virtio_fs_read_tag()
325 fs->tag[len] = '\0'; in virtio_fs_read_tag()
328 * awkward on mount(8) command-lines and cause problems in the sysfs in virtio_fs_read_tag()
331 if (strchr(fs->tag, '\n')) { in virtio_fs_read_tag()
332 dev_dbg(&vdev->dev, "refusing virtiofs tag with newline character\n"); in virtio_fs_read_tag()
333 return -EINVAL; in virtio_fs_read_tag()
344 struct virtqueue *vq = fsvq->vq; in virtio_fs_hiprio_done_work()
347 spin_lock(&fsvq->lock); in virtio_fs_hiprio_done_work()
359 spin_unlock(&fsvq->lock); in virtio_fs_hiprio_done_work()
369 pr_debug("virtio-fs: worker %s called.\n", __func__); in virtio_fs_request_dispatch_work()
371 spin_lock(&fsvq->lock); in virtio_fs_request_dispatch_work()
372 req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req, in virtio_fs_request_dispatch_work()
375 spin_unlock(&fsvq->lock); in virtio_fs_request_dispatch_work()
379 list_del_init(&req->list); in virtio_fs_request_dispatch_work()
380 spin_unlock(&fsvq->lock); in virtio_fs_request_dispatch_work()
386 spin_lock(&fsvq->lock); in virtio_fs_request_dispatch_work()
387 req = list_first_entry_or_null(&fsvq->queued_reqs, in virtio_fs_request_dispatch_work()
390 spin_unlock(&fsvq->lock); in virtio_fs_request_dispatch_work()
393 list_del_init(&req->list); in virtio_fs_request_dispatch_work()
394 spin_unlock(&fsvq->lock); in virtio_fs_request_dispatch_work()
398 if (ret == -ENOMEM || ret == -ENOSPC) { in virtio_fs_request_dispatch_work()
399 spin_lock(&fsvq->lock); in virtio_fs_request_dispatch_work()
400 list_add_tail(&req->list, &fsvq->queued_reqs); in virtio_fs_request_dispatch_work()
401 schedule_delayed_work(&fsvq->dispatch_work, in virtio_fs_request_dispatch_work()
403 spin_unlock(&fsvq->lock); in virtio_fs_request_dispatch_work()
406 req->out.h.error = ret; in virtio_fs_request_dispatch_work()
407 spin_lock(&fsvq->lock); in virtio_fs_request_dispatch_work()
409 spin_unlock(&fsvq->lock); in virtio_fs_request_dispatch_work()
410 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", in virtio_fs_request_dispatch_work()
429 struct virtio_fs_forget_req *req = &forget->req; in send_forget_request()
431 spin_lock(&fsvq->lock); in send_forget_request()
432 if (!fsvq->connected) { in send_forget_request()
440 vq = fsvq->vq; in send_forget_request()
441 dev_dbg(&vq->vdev->dev, "%s\n", __func__); in send_forget_request()
445 if (ret == -ENOMEM || ret == -ENOSPC) { in send_forget_request()
446 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n", in send_forget_request()
448 list_add_tail(&forget->list, &fsvq->queued_reqs); in send_forget_request()
449 schedule_delayed_work(&fsvq->dispatch_work, in send_forget_request()
456 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n", in send_forget_request()
468 spin_unlock(&fsvq->lock); in send_forget_request()
474 spin_unlock(&fsvq->lock); in send_forget_request()
483 pr_debug("virtio-fs: worker %s called.\n", __func__); in virtio_fs_hiprio_dispatch_work()
485 spin_lock(&fsvq->lock); in virtio_fs_hiprio_dispatch_work()
486 forget = list_first_entry_or_null(&fsvq->queued_reqs, in virtio_fs_hiprio_dispatch_work()
489 spin_unlock(&fsvq->lock); in virtio_fs_hiprio_dispatch_work()
493 list_del(&forget->list); in virtio_fs_hiprio_dispatch_work()
494 spin_unlock(&fsvq->lock); in virtio_fs_hiprio_dispatch_work()
500 /* Allocate and copy args into req->argbuf */
503 struct fuse_args *args = req->args; in copy_args_to_argbuf()
510 num_in = args->in_numargs - args->in_pages; in copy_args_to_argbuf()
511 num_out = args->out_numargs - args->out_pages; in copy_args_to_argbuf()
512 len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) + in copy_args_to_argbuf()
513 fuse_len_args(num_out, args->out_args); in copy_args_to_argbuf()
515 req->argbuf = kmalloc(len, GFP_ATOMIC); in copy_args_to_argbuf()
516 if (!req->argbuf) in copy_args_to_argbuf()
517 return -ENOMEM; in copy_args_to_argbuf()
520 memcpy(req->argbuf + offset, in copy_args_to_argbuf()
521 args->in_args[i].value, in copy_args_to_argbuf()
522 args->in_args[i].size); in copy_args_to_argbuf()
523 offset += args->in_args[i].size; in copy_args_to_argbuf()
529 /* Copy args out of and free req->argbuf */
538 remaining = req->out.h.len - sizeof(req->out.h); in copy_args_from_argbuf()
539 num_in = args->in_numargs - args->in_pages; in copy_args_from_argbuf()
540 num_out = args->out_numargs - args->out_pages; in copy_args_from_argbuf()
541 offset = fuse_len_args(num_in, (struct fuse_arg *)args->in_args); in copy_args_from_argbuf()
544 unsigned int argsize = args->out_args[i].size; in copy_args_from_argbuf()
546 if (args->out_argvar && in copy_args_from_argbuf()
547 i == args->out_numargs - 1 && in copy_args_from_argbuf()
552 memcpy(args->out_args[i].value, req->argbuf + offset, argsize); in copy_args_from_argbuf()
555 if (i != args->out_numargs - 1) in copy_args_from_argbuf()
556 remaining -= argsize; in copy_args_from_argbuf()
559 /* Store the actual size of the variable-length arg */ in copy_args_from_argbuf()
560 if (args->out_argvar) in copy_args_from_argbuf()
561 args->out_args[args->out_numargs - 1].size = remaining; in copy_args_from_argbuf()
563 kfree(req->argbuf); in copy_args_from_argbuf()
564 req->argbuf = NULL; in copy_args_from_argbuf()
571 struct fuse_pqueue *fpq = &fsvq->fud->pq; in virtio_fs_request_complete()
581 args = req->args; in virtio_fs_request_complete()
584 if (args->out_pages && args->page_zeroing) { in virtio_fs_request_complete()
585 len = args->out_args[args->out_numargs - 1].size; in virtio_fs_request_complete()
587 for (i = 0; i < ap->num_pages; i++) { in virtio_fs_request_complete()
588 thislen = ap->descs[i].length; in virtio_fs_request_complete()
590 WARN_ON(ap->descs[i].offset); in virtio_fs_request_complete()
591 page = ap->pages[i]; in virtio_fs_request_complete()
595 len -= thislen; in virtio_fs_request_complete()
600 spin_lock(&fpq->lock); in virtio_fs_request_complete()
601 clear_bit(FR_SENT, &req->flags); in virtio_fs_request_complete()
602 spin_unlock(&fpq->lock); in virtio_fs_request_complete()
605 spin_lock(&fsvq->lock); in virtio_fs_request_complete()
607 spin_unlock(&fsvq->lock); in virtio_fs_request_complete()
615 virtio_fs_request_complete(w->req, w->fsvq); in virtio_fs_complete_req_work()
623 struct fuse_pqueue *fpq = &fsvq->fud->pq; in virtio_fs_requests_done_work()
624 struct virtqueue *vq = fsvq->vq; in virtio_fs_requests_done_work()
631 spin_lock(&fsvq->lock); in virtio_fs_requests_done_work()
636 spin_lock(&fpq->lock); in virtio_fs_requests_done_work()
637 list_move_tail(&req->list, &reqs); in virtio_fs_requests_done_work()
638 spin_unlock(&fpq->lock); in virtio_fs_requests_done_work()
641 spin_unlock(&fsvq->lock); in virtio_fs_requests_done_work()
645 list_del_init(&req->list); in virtio_fs_requests_done_work()
648 if (req->args->may_block) { in virtio_fs_requests_done_work()
652 INIT_WORK(&w->done_work, virtio_fs_complete_req_work); in virtio_fs_requests_done_work()
653 w->fsvq = fsvq; in virtio_fs_requests_done_work()
654 w->req = req; in virtio_fs_requests_done_work()
655 schedule_work(&w->done_work); in virtio_fs_requests_done_work()
667 dev_dbg(&vq->vdev->dev, "%s %s\n", __func__, fsvq->name); in virtio_fs_vq_done()
669 schedule_work(&fsvq->done_work); in virtio_fs_vq_done()
675 strscpy(fsvq->name, name, VQ_NAME_LEN); in virtio_fs_init_vq()
676 spin_lock_init(&fsvq->lock); in virtio_fs_init_vq()
677 INIT_LIST_HEAD(&fsvq->queued_reqs); in virtio_fs_init_vq()
678 INIT_LIST_HEAD(&fsvq->end_reqs); in virtio_fs_init_vq()
679 init_completion(&fsvq->in_flight_zero); in virtio_fs_init_vq()
682 INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work); in virtio_fs_init_vq()
683 INIT_DELAYED_WORK(&fsvq->dispatch_work, in virtio_fs_init_vq()
686 INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work); in virtio_fs_init_vq()
687 INIT_DELAYED_WORK(&fsvq->dispatch_work, in virtio_fs_init_vq()
703 &fs->num_request_queues); in virtio_fs_setup_vqs()
704 if (fs->num_request_queues == 0) in virtio_fs_setup_vqs()
705 return -EINVAL; in virtio_fs_setup_vqs()
707 fs->nvqs = VQ_REQUEST + fs->num_request_queues; in virtio_fs_setup_vqs()
708 fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL); in virtio_fs_setup_vqs()
709 if (!fs->vqs) in virtio_fs_setup_vqs()
710 return -ENOMEM; in virtio_fs_setup_vqs()
712 vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL); in virtio_fs_setup_vqs()
713 callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]), in virtio_fs_setup_vqs()
715 names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL); in virtio_fs_setup_vqs()
717 ret = -ENOMEM; in virtio_fs_setup_vqs()
723 virtio_fs_init_vq(&fs->vqs[VQ_HIPRIO], "hiprio", VQ_HIPRIO); in virtio_fs_setup_vqs()
724 names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name; in virtio_fs_setup_vqs()
727 for (i = VQ_REQUEST; i < fs->nvqs; i++) { in virtio_fs_setup_vqs()
730 snprintf(vq_name, VQ_NAME_LEN, "requests.%u", i - VQ_REQUEST); in virtio_fs_setup_vqs()
731 virtio_fs_init_vq(&fs->vqs[i], vq_name, VQ_REQUEST); in virtio_fs_setup_vqs()
733 names[i] = fs->vqs[i].name; in virtio_fs_setup_vqs()
736 ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL); in virtio_fs_setup_vqs()
740 for (i = 0; i < fs->nvqs; i++) in virtio_fs_setup_vqs()
741 fs->vqs[i].vq = vqs[i]; in virtio_fs_setup_vqs()
749 kfree(fs->vqs); in virtio_fs_setup_vqs()
753 /* Free virtqueues (device must already be reset) */
756 vdev->config->del_vqs(vdev); in virtio_fs_cleanup_vqs()
769 size_t max_nr_pages = fs->window_len / PAGE_SIZE - pgoff; in virtio_fs_direct_access()
772 *kaddr = fs->window_kaddr + offset; in virtio_fs_direct_access()
774 *pfn = phys_to_pfn_t(fs->window_phys_addr + offset, in virtio_fs_direct_access()
821 dev_notice(&vdev->dev, "%s: No cache capability\n", __func__); in virtio_fs_setup_dax()
825 if (!devm_request_mem_region(&vdev->dev, cache_reg.addr, cache_reg.len, in virtio_fs_setup_dax()
826 dev_name(&vdev->dev))) { in virtio_fs_setup_dax()
827 dev_warn(&vdev->dev, "could not reserve region addr=0x%llx len=0x%llx\n", in virtio_fs_setup_dax()
829 return -EBUSY; in virtio_fs_setup_dax()
832 dev_notice(&vdev->dev, "Cache len: 0x%llx @ 0x%llx\n", cache_reg.len, in virtio_fs_setup_dax()
835 pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL); in virtio_fs_setup_dax()
837 return -ENOMEM; in virtio_fs_setup_dax()
839 pgmap->type = MEMORY_DEVICE_FS_DAX; in virtio_fs_setup_dax()
846 pgmap->range = (struct range) { in virtio_fs_setup_dax()
848 .end = (phys_addr_t) cache_reg.addr + cache_reg.len - 1, in virtio_fs_setup_dax()
850 pgmap->nr_range = 1; in virtio_fs_setup_dax()
852 fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap); in virtio_fs_setup_dax()
853 if (IS_ERR(fs->window_kaddr)) in virtio_fs_setup_dax()
854 return PTR_ERR(fs->window_kaddr); in virtio_fs_setup_dax()
856 fs->window_phys_addr = (phys_addr_t) cache_reg.addr; in virtio_fs_setup_dax()
857 fs->window_len = (phys_addr_t) cache_reg.len; in virtio_fs_setup_dax()
859 dev_dbg(&vdev->dev, "%s: window kaddr 0x%px phys_addr 0x%llx len 0x%llx\n", in virtio_fs_setup_dax()
860 __func__, fs->window_kaddr, cache_reg.addr, cache_reg.len); in virtio_fs_setup_dax()
862 fs->dax_dev = alloc_dax(fs, &virtio_fs_dax_ops); in virtio_fs_setup_dax()
863 if (IS_ERR(fs->dax_dev)) in virtio_fs_setup_dax()
864 return PTR_ERR(fs->dax_dev); in virtio_fs_setup_dax()
866 return devm_add_action_or_reset(&vdev->dev, virtio_fs_cleanup_dax, in virtio_fs_setup_dax()
867 fs->dax_dev); in virtio_fs_setup_dax()
877 return -ENOMEM; in virtio_fs_probe()
878 kref_init(&fs->refcount); in virtio_fs_probe()
879 vdev->priv = fs; in virtio_fs_probe()
895 /* Bring the device online in case the filesystem is mounted and in virtio_fs_probe()
909 kfree(fs->vqs); in virtio_fs_probe()
912 vdev->priv = NULL; in virtio_fs_probe()
922 for (i = 0; i < fs->nvqs; i++) { in virtio_fs_stop_all_queues()
923 fsvq = &fs->vqs[i]; in virtio_fs_stop_all_queues()
924 spin_lock(&fsvq->lock); in virtio_fs_stop_all_queues()
925 fsvq->connected = false; in virtio_fs_stop_all_queues()
926 spin_unlock(&fsvq->lock); in virtio_fs_stop_all_queues()
932 struct virtio_fs *fs = vdev->priv; in virtio_fs_remove()
935 /* This device is going away. No one should get new reference */ in virtio_fs_remove()
936 list_del_init(&fs->list); in virtio_fs_remove()
942 vdev->priv = NULL; in virtio_fs_remove()
943 /* Put device reference on virtio_fs object */ in virtio_fs_remove()
952 pr_warn("virtio-fs: suspend/resume not yet supported\n"); in virtio_fs_freeze()
953 return -EOPNOTSUPP; in virtio_fs_freeze()
984 static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq) in virtio_fs_wake_forget_and_unlock() argument
985 __releases(fiq->lock) in virtio_fs_wake_forget_and_unlock()
994 link = fuse_dequeue_forget(fiq, 1, NULL); in virtio_fs_wake_forget_and_unlock()
995 unique = fuse_get_unique(fiq); in virtio_fs_wake_forget_and_unlock()
997 fs = fiq->priv; in virtio_fs_wake_forget_and_unlock()
998 fsvq = &fs->vqs[VQ_HIPRIO]; in virtio_fs_wake_forget_and_unlock()
999 spin_unlock(&fiq->lock); in virtio_fs_wake_forget_and_unlock()
1003 req = &forget->req; in virtio_fs_wake_forget_and_unlock()
1005 req->ih = (struct fuse_in_header){ in virtio_fs_wake_forget_and_unlock()
1007 .nodeid = link->forget_one.nodeid, in virtio_fs_wake_forget_and_unlock()
1011 req->arg = (struct fuse_forget_in){ in virtio_fs_wake_forget_and_unlock()
1012 .nlookup = link->forget_one.nlookup, in virtio_fs_wake_forget_and_unlock()
1019 static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq) in virtio_fs_wake_interrupt_and_unlock() argument
1020 __releases(fiq->lock) in virtio_fs_wake_interrupt_and_unlock()
1029 spin_unlock(&fiq->lock); in virtio_fs_wake_interrupt_and_unlock()
1032 /* Count number of scatter-gather elements required */
1042 total_len -= this_len; in sg_count_fuse_pages()
1048 /* Return the number of scatter-gather list elements required */
1051 struct fuse_args *args = req->args; in sg_count_fuse_req()
1055 if (args->in_numargs - args->in_pages) in sg_count_fuse_req()
1058 if (args->in_pages) { in sg_count_fuse_req()
1059 size = args->in_args[args->in_numargs - 1].size; in sg_count_fuse_req()
1060 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages, in sg_count_fuse_req()
1064 if (!test_bit(FR_ISREPLY, &req->flags)) in sg_count_fuse_req()
1069 if (args->out_numargs - args->out_pages) in sg_count_fuse_req()
1072 if (args->out_pages) { in sg_count_fuse_req()
1073 size = args->out_args[args->out_numargs - 1].size; in sg_count_fuse_req()
1074 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages, in sg_count_fuse_req()
1081 /* Add pages to scatter-gather list and return number of elements used */
1095 total_len -= this_len; in sg_init_fuse_pages()
1101 /* Add args to scatter-gather list and return number of elements used */
1110 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args); in sg_init_fuse_args()
1114 len = fuse_len_args(numargs - argpages, args); in sg_init_fuse_args()
1120 ap->pages, ap->descs, in sg_init_fuse_args()
1121 ap->num_pages, in sg_init_fuse_args()
1122 args[numargs - 1].size); in sg_init_fuse_args()
1130 /* Add a request to a virtqueue and kick the device */
1140 struct fuse_args *args = req->args; in virtio_fs_enqueue_req()
1156 ret = -ENOMEM; in virtio_fs_enqueue_req()
1167 sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h)); in virtio_fs_enqueue_req()
1169 (struct fuse_arg *)args->in_args, in virtio_fs_enqueue_req()
1170 args->in_numargs, args->in_pages, in virtio_fs_enqueue_req()
1171 req->argbuf, &argbuf_used); in virtio_fs_enqueue_req()
1174 if (test_bit(FR_ISREPLY, &req->flags)) { in virtio_fs_enqueue_req()
1176 &req->out.h, sizeof(req->out.h)); in virtio_fs_enqueue_req()
1178 args->out_args, args->out_numargs, in virtio_fs_enqueue_req()
1179 args->out_pages, in virtio_fs_enqueue_req()
1180 req->argbuf + argbuf_used, NULL); in virtio_fs_enqueue_req()
1188 spin_lock(&fsvq->lock); in virtio_fs_enqueue_req()
1190 if (!fsvq->connected) { in virtio_fs_enqueue_req()
1191 spin_unlock(&fsvq->lock); in virtio_fs_enqueue_req()
1192 ret = -ENOTCONN; in virtio_fs_enqueue_req()
1196 vq = fsvq->vq; in virtio_fs_enqueue_req()
1199 spin_unlock(&fsvq->lock); in virtio_fs_enqueue_req()
1204 fpq = &fsvq->fud->pq; in virtio_fs_enqueue_req()
1205 spin_lock(&fpq->lock); in virtio_fs_enqueue_req()
1206 list_add_tail(&req->list, fpq->processing); in virtio_fs_enqueue_req()
1207 spin_unlock(&fpq->lock); in virtio_fs_enqueue_req()
1208 set_bit(FR_SENT, &req->flags); in virtio_fs_enqueue_req()
1216 spin_unlock(&fsvq->lock); in virtio_fs_enqueue_req()
1222 if (ret < 0 && req->argbuf) { in virtio_fs_enqueue_req()
1223 kfree(req->argbuf); in virtio_fs_enqueue_req()
1224 req->argbuf = NULL; in virtio_fs_enqueue_req()
1234 static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq) in virtio_fs_wake_pending_and_unlock() argument
1235 __releases(fiq->lock) in virtio_fs_wake_pending_and_unlock()
1243 WARN_ON(list_empty(&fiq->pending)); in virtio_fs_wake_pending_and_unlock()
1244 req = list_last_entry(&fiq->pending, struct fuse_req, list); in virtio_fs_wake_pending_and_unlock()
1245 clear_bit(FR_PENDING, &req->flags); in virtio_fs_wake_pending_and_unlock()
1246 list_del_init(&req->list); in virtio_fs_wake_pending_and_unlock()
1247 WARN_ON(!list_empty(&fiq->pending)); in virtio_fs_wake_pending_and_unlock()
1248 spin_unlock(&fiq->lock); in virtio_fs_wake_pending_and_unlock()
1250 fs = fiq->priv; in virtio_fs_wake_pending_and_unlock()
1253 __func__, req->in.h.opcode, req->in.h.unique, in virtio_fs_wake_pending_and_unlock()
1254 req->in.h.nodeid, req->in.h.len, in virtio_fs_wake_pending_and_unlock()
1255 fuse_len_args(req->args->out_numargs, req->args->out_args)); in virtio_fs_wake_pending_and_unlock()
1257 fsvq = &fs->vqs[queue_id]; in virtio_fs_wake_pending_and_unlock()
1260 if (ret == -ENOMEM || ret == -ENOSPC) { in virtio_fs_wake_pending_and_unlock()
1263 * context as we might be holding fc->bg_lock. in virtio_fs_wake_pending_and_unlock()
1265 spin_lock(&fsvq->lock); in virtio_fs_wake_pending_and_unlock()
1266 list_add_tail(&req->list, &fsvq->queued_reqs); in virtio_fs_wake_pending_and_unlock()
1268 schedule_delayed_work(&fsvq->dispatch_work, in virtio_fs_wake_pending_and_unlock()
1270 spin_unlock(&fsvq->lock); in virtio_fs_wake_pending_and_unlock()
1273 req->out.h.error = ret; in virtio_fs_wake_pending_and_unlock()
1274 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret); in virtio_fs_wake_pending_and_unlock()
1277 spin_lock(&fsvq->lock); in virtio_fs_wake_pending_and_unlock()
1278 list_add_tail(&req->list, &fsvq->end_reqs); in virtio_fs_wake_pending_and_unlock()
1279 schedule_delayed_work(&fsvq->dispatch_work, 0); in virtio_fs_wake_pending_and_unlock()
1280 spin_unlock(&fsvq->lock); in virtio_fs_wake_pending_and_unlock()
1294 ctx->rootmode = S_IFDIR; in virtio_fs_ctx_set_defaults()
1295 ctx->default_permissions = 1; in virtio_fs_ctx_set_defaults()
1296 ctx->allow_other = 1; in virtio_fs_ctx_set_defaults()
1297 ctx->max_read = UINT_MAX; in virtio_fs_ctx_set_defaults()
1298 ctx->blksize = 512; in virtio_fs_ctx_set_defaults()
1299 ctx->destroy = true; in virtio_fs_ctx_set_defaults()
1300 ctx->no_control = true; in virtio_fs_ctx_set_defaults()
1301 ctx->no_force_umount = true; in virtio_fs_ctx_set_defaults()
1307 struct fuse_conn *fc = fm->fc; in virtio_fs_fill_super()
1308 struct virtio_fs *fs = fc->iq.priv; in virtio_fs_fill_super()
1309 struct fuse_fs_context *ctx = fsc->fs_private; in virtio_fs_fill_super()
1316 /* After holding mutex, make sure virtiofs device is still there. in virtio_fs_fill_super()
1317 * Though we are holding a reference to it, drive ->remove might in virtio_fs_fill_super()
1320 err = -EINVAL; in virtio_fs_fill_super()
1321 if (list_empty(&fs->list)) { in virtio_fs_fill_super()
1322 pr_info("virtio-fs: tag <%s> not found\n", fs->tag); in virtio_fs_fill_super()
1326 err = -ENOMEM; in virtio_fs_fill_super()
1328 for (i = 0; i < fs->nvqs; i++) { in virtio_fs_fill_super()
1329 struct virtio_fs_vq *fsvq = &fs->vqs[i]; in virtio_fs_fill_super()
1331 fsvq->fud = fuse_dev_alloc(); in virtio_fs_fill_super()
1332 if (!fsvq->fud) in virtio_fs_fill_super()
1337 ctx->fudptr = NULL; in virtio_fs_fill_super()
1338 if (ctx->dax_mode != FUSE_DAX_NEVER) { in virtio_fs_fill_super()
1339 if (ctx->dax_mode == FUSE_DAX_ALWAYS && !fs->dax_dev) { in virtio_fs_fill_super()
1340 err = -EINVAL; in virtio_fs_fill_super()
1341 pr_err("virtio-fs: dax can't be enabled as filesystem" in virtio_fs_fill_super()
1342 " device does not support it.\n"); in virtio_fs_fill_super()
1345 ctx->dax_dev = fs->dax_dev; in virtio_fs_fill_super()
1351 for (i = 0; i < fs->nvqs; i++) { in virtio_fs_fill_super()
1352 struct virtio_fs_vq *fsvq = &fs->vqs[i]; in virtio_fs_fill_super()
1354 fuse_dev_install(fsvq->fud, fc); in virtio_fs_fill_super()
1372 struct fuse_conn *fc = fm->fc; in virtio_fs_conn_destroy()
1373 struct virtio_fs *vfs = fc->iq.priv; in virtio_fs_conn_destroy()
1374 struct virtio_fs_vq *fsvq = &vfs->vqs[VQ_HIPRIO]; in virtio_fs_conn_destroy()
1383 spin_lock(&fsvq->lock); in virtio_fs_conn_destroy()
1384 fsvq->connected = false; in virtio_fs_conn_destroy()
1385 spin_unlock(&fsvq->lock); in virtio_fs_conn_destroy()
1406 if (sb->s_root) { in virtio_kill_sb()
1418 struct fuse_mount *fsc_fm = fsc->s_fs_info; in virtio_fs_test_super()
1421 return fsc_fm->fc->iq.priv == sb_fm->fc->iq.priv; in virtio_fs_test_super()
1431 int err = -EIO; in virtio_fs_get_tree()
1434 * in fc->iq->priv. Once fuse_conn is going away, it calls ->put() in virtio_fs_get_tree()
1437 fs = virtio_fs_find_instance(fsc->source); in virtio_fs_get_tree()
1439 pr_info("virtio-fs: tag <%s> not found\n", fsc->source); in virtio_fs_get_tree()
1440 return -EINVAL; in virtio_fs_get_tree()
1443 virtqueue_size = virtqueue_get_vring_size(fs->vqs[VQ_REQUEST].vq); in virtio_fs_get_tree()
1447 err = -ENOMEM; in virtio_fs_get_tree()
1456 fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs); in virtio_fs_get_tree()
1457 fc->release = fuse_free_conn; in virtio_fs_get_tree()
1458 fc->delete_stale = true; in virtio_fs_get_tree()
1459 fc->auto_submounts = true; in virtio_fs_get_tree()
1460 fc->sync_fs = true; in virtio_fs_get_tree()
1463 fc->max_pages_limit = min_t(unsigned int, fc->max_pages_limit, in virtio_fs_get_tree()
1464 virtqueue_size - FUSE_HEADER_OVERHEAD); in virtio_fs_get_tree()
1466 fsc->s_fs_info = fm; in virtio_fs_get_tree()
1468 if (fsc->s_fs_info) in virtio_fs_get_tree()
1473 if (!sb->s_root) { in virtio_fs_get_tree()
1480 sb->s_flags |= SB_ACTIVE; in virtio_fs_get_tree()
1483 WARN_ON(fsc->root); in virtio_fs_get_tree()
1484 fsc->root = dget(sb->s_root); in virtio_fs_get_tree()
1505 if (fsc->purpose == FS_CONTEXT_FOR_SUBMOUNT) in virtio_fs_init_fs_context()
1510 return -ENOMEM; in virtio_fs_init_fs_context()
1511 fsc->fs_private = ctx; in virtio_fs_init_fs_context()
1512 fsc->ops = &virtio_fs_context_ops; in virtio_fs_init_fs_context()