Lines Matching +full:- +full:- +full:disable +full:- +full:vhost +full:- +full:vsock
1 // SPDX-License-Identifier: GPL-2.0-only
14 #include <linux/vhost.h>
34 #include "vhost.h"
49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
55 vq->user_be = !virtio_legacy_is_little_endian(); in vhost_disable_cross_endian()
60 vq->user_be = true; in vhost_enable_cross_endian_big()
65 vq->user_be = false; in vhost_enable_cross_endian_little()
72 if (vq->private_data) in vhost_set_vring_endian()
73 return -EBUSY; in vhost_set_vring_endian()
76 return -EFAULT; in vhost_set_vring_endian()
80 return -EINVAL; in vhost_set_vring_endian()
95 .num = vq->user_be in vhost_get_vring_endian()
99 return -EFAULT; in vhost_get_vring_endian()
111 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be; in vhost_init_is_le()
120 return -ENOIOCTLCMD; in vhost_set_vring_endian()
126 return -ENOIOCTLCMD; in vhost_get_vring_endian()
131 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) in vhost_init_is_le()
151 complete(&s->wait_event); in vhost_flush_work()
160 poll->wqh = wqh; in vhost_poll_func()
161 add_wait_queue(wqh, &poll->wait); in vhost_poll_func()
168 struct vhost_work *work = &poll->work; in vhost_poll_wakeup()
170 if (!(key_to_poll(key) & poll->mask)) in vhost_poll_wakeup()
173 if (!poll->dev->use_worker) in vhost_poll_wakeup()
174 work->fn(work); in vhost_poll_wakeup()
183 clear_bit(VHOST_WORK_QUEUED, &work->flags); in vhost_work_init()
184 work->fn = fn; in vhost_work_init()
193 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); in vhost_poll_init()
194 init_poll_funcptr(&poll->table, vhost_poll_func); in vhost_poll_init()
195 poll->mask = mask; in vhost_poll_init()
196 poll->dev = dev; in vhost_poll_init()
197 poll->wqh = NULL; in vhost_poll_init()
198 poll->vq = vq; in vhost_poll_init()
200 vhost_work_init(&poll->work, fn); in vhost_poll_init()
210 if (poll->wqh) in vhost_poll_start()
213 mask = vfs_poll(file, &poll->table); in vhost_poll_start()
215 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask)); in vhost_poll_start()
218 return -EINVAL; in vhost_poll_start()
229 if (poll->wqh) { in vhost_poll_stop()
230 remove_wait_queue(poll->wqh, &poll->wait); in vhost_poll_stop()
231 poll->wqh = NULL; in vhost_poll_stop()
239 if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) { in vhost_worker_queue()
244 llist_add(&work->node, &worker->work_list); in vhost_worker_queue()
245 vhost_task_wake(worker->vtsk); in vhost_worker_queue()
255 worker = rcu_dereference(vq->worker); in vhost_vq_work_queue()
279 * __vhost_worker_flush - flush a worker
288 if (!worker->attachment_cnt || worker->killed) in __vhost_worker_flush()
299 mutex_unlock(&worker->mutex); in __vhost_worker_flush()
301 mutex_lock(&worker->mutex); in __vhost_worker_flush()
306 mutex_lock(&worker->mutex); in vhost_worker_flush()
308 mutex_unlock(&worker->mutex); in vhost_worker_flush()
316 xa_for_each(&dev->worker_xa, i, worker) in vhost_dev_flush()
328 worker = rcu_dereference(vq->worker); in vhost_vq_has_work()
329 if (worker && !llist_empty(&worker->work_list)) in vhost_vq_has_work()
339 vhost_vq_work_queue(poll->vq, &poll->work); in vhost_poll_queue()
348 vq->meta_iotlb[j] = NULL; in __vhost_vq_meta_reset()
355 for (i = 0; i < d->nvqs; ++i) in vhost_vq_meta_reset()
356 __vhost_vq_meta_reset(d->vqs[i]); in vhost_vq_meta_reset()
361 call_ctx->ctx = NULL; in vhost_vring_call_reset()
362 memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer)); in vhost_vring_call_reset()
367 return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq); in vhost_vq_is_setup()
374 vq->num = 1; in vhost_vq_reset()
375 vq->desc = NULL; in vhost_vq_reset()
376 vq->avail = NULL; in vhost_vq_reset()
377 vq->used = NULL; in vhost_vq_reset()
378 vq->last_avail_idx = 0; in vhost_vq_reset()
379 vq->avail_idx = 0; in vhost_vq_reset()
380 vq->last_used_idx = 0; in vhost_vq_reset()
381 vq->signalled_used = 0; in vhost_vq_reset()
382 vq->signalled_used_valid = false; in vhost_vq_reset()
383 vq->used_flags = 0; in vhost_vq_reset()
384 vq->log_used = false; in vhost_vq_reset()
385 vq->log_addr = -1ull; in vhost_vq_reset()
386 vq->private_data = NULL; in vhost_vq_reset()
387 vq->acked_features = 0; in vhost_vq_reset()
388 vq->acked_backend_features = 0; in vhost_vq_reset()
389 vq->log_base = NULL; in vhost_vq_reset()
390 vq->error_ctx = NULL; in vhost_vq_reset()
391 vq->kick = NULL; in vhost_vq_reset()
392 vq->log_ctx = NULL; in vhost_vq_reset()
395 vq->busyloop_timeout = 0; in vhost_vq_reset()
396 vq->umem = NULL; in vhost_vq_reset()
397 vq->iotlb = NULL; in vhost_vq_reset()
398 rcu_assign_pointer(vq->worker, NULL); in vhost_vq_reset()
399 vhost_vring_call_reset(&vq->call_ctx); in vhost_vq_reset()
409 node = llist_del_all(&worker->work_list); in vhost_run_work_list()
417 clear_bit(VHOST_WORK_QUEUED, &work->flags); in vhost_run_work_list()
418 kcov_remote_start_common(worker->kcov_handle); in vhost_run_work_list()
419 work->fn(work); in vhost_run_work_list()
431 struct vhost_dev *dev = worker->dev; in vhost_worker_killed()
435 mutex_lock(&worker->mutex); in vhost_worker_killed()
436 worker->killed = true; in vhost_worker_killed()
438 for (i = 0; i < dev->nvqs; i++) { in vhost_worker_killed()
439 vq = dev->vqs[i]; in vhost_worker_killed()
441 mutex_lock(&vq->mutex); in vhost_worker_killed()
443 rcu_dereference_check(vq->worker, in vhost_worker_killed()
444 lockdep_is_held(&vq->mutex))) { in vhost_worker_killed()
445 rcu_assign_pointer(vq->worker, NULL); in vhost_worker_killed()
448 mutex_unlock(&vq->mutex); in vhost_worker_killed()
451 worker->attachment_cnt -= attach_cnt; in vhost_worker_killed()
459 mutex_unlock(&worker->mutex); in vhost_worker_killed()
464 kfree(vq->indirect); in vhost_vq_free_iovecs()
465 vq->indirect = NULL; in vhost_vq_free_iovecs()
466 kfree(vq->log); in vhost_vq_free_iovecs()
467 vq->log = NULL; in vhost_vq_free_iovecs()
468 kfree(vq->heads); in vhost_vq_free_iovecs()
469 vq->heads = NULL; in vhost_vq_free_iovecs()
478 for (i = 0; i < dev->nvqs; ++i) { in vhost_dev_alloc_iovecs()
479 vq = dev->vqs[i]; in vhost_dev_alloc_iovecs()
480 vq->indirect = kmalloc_array(UIO_MAXIOV, in vhost_dev_alloc_iovecs()
481 sizeof(*vq->indirect), in vhost_dev_alloc_iovecs()
483 vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log), in vhost_dev_alloc_iovecs()
485 vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads), in vhost_dev_alloc_iovecs()
487 if (!vq->indirect || !vq->log || !vq->heads) in vhost_dev_alloc_iovecs()
493 for (; i >= 0; --i) in vhost_dev_alloc_iovecs()
494 vhost_vq_free_iovecs(dev->vqs[i]); in vhost_dev_alloc_iovecs()
495 return -ENOMEM; in vhost_dev_alloc_iovecs()
502 for (i = 0; i < dev->nvqs; ++i) in vhost_dev_free_iovecs()
503 vhost_vq_free_iovecs(dev->vqs[i]); in vhost_dev_free_iovecs()
509 struct vhost_dev *dev = vq->dev; in vhost_exceeds_weight()
511 if ((dev->byte_weight && total_len >= dev->byte_weight) || in vhost_exceeds_weight()
512 pkts >= dev->weight) { in vhost_exceeds_weight()
513 vhost_poll_queue(&vq->poll); in vhost_exceeds_weight()
527 return size_add(struct_size(vq->avail, ring, num), event); in vhost_get_avail_size()
536 return size_add(struct_size(vq->used, ring, num), event); in vhost_get_used_size()
542 return sizeof(*vq->desc) * num; in vhost_get_desc_size()
555 dev->vqs = vqs; in vhost_dev_init()
556 dev->nvqs = nvqs; in vhost_dev_init()
557 mutex_init(&dev->mutex); in vhost_dev_init()
558 dev->log_ctx = NULL; in vhost_dev_init()
559 dev->umem = NULL; in vhost_dev_init()
560 dev->iotlb = NULL; in vhost_dev_init()
561 dev->mm = NULL; in vhost_dev_init()
562 dev->iov_limit = iov_limit; in vhost_dev_init()
563 dev->weight = weight; in vhost_dev_init()
564 dev->byte_weight = byte_weight; in vhost_dev_init()
565 dev->use_worker = use_worker; in vhost_dev_init()
566 dev->msg_handler = msg_handler; in vhost_dev_init()
567 init_waitqueue_head(&dev->wait); in vhost_dev_init()
568 INIT_LIST_HEAD(&dev->read_list); in vhost_dev_init()
569 INIT_LIST_HEAD(&dev->pending_list); in vhost_dev_init()
570 spin_lock_init(&dev->iotlb_lock); in vhost_dev_init()
571 xa_init_flags(&dev->worker_xa, XA_FLAGS_ALLOC); in vhost_dev_init()
573 for (i = 0; i < dev->nvqs; ++i) { in vhost_dev_init()
574 vq = dev->vqs[i]; in vhost_dev_init()
575 vq->log = NULL; in vhost_dev_init()
576 vq->indirect = NULL; in vhost_dev_init()
577 vq->heads = NULL; in vhost_dev_init()
578 vq->dev = dev; in vhost_dev_init()
579 mutex_init(&vq->mutex); in vhost_dev_init()
581 if (vq->handle_kick) in vhost_dev_init()
582 vhost_poll_init(&vq->poll, vq->handle_kick, in vhost_dev_init()
592 return dev->mm == current->mm ? 0 : -EPERM; in vhost_dev_check_owner()
599 return dev->mm; in vhost_dev_has_owner()
606 if (dev->use_worker) { in vhost_attach_mm()
607 dev->mm = get_task_mm(current); in vhost_attach_mm()
615 dev->mm = current->mm; in vhost_attach_mm()
616 mmgrab(dev->mm); in vhost_attach_mm()
622 if (!dev->mm) in vhost_detach_mm()
625 if (dev->use_worker) in vhost_detach_mm()
626 mmput(dev->mm); in vhost_detach_mm()
628 mmdrop(dev->mm); in vhost_detach_mm()
630 dev->mm = NULL; in vhost_detach_mm()
639 WARN_ON(!llist_empty(&worker->work_list)); in vhost_worker_destroy()
640 xa_erase(&dev->worker_xa, worker->id); in vhost_worker_destroy()
641 vhost_task_stop(worker->vtsk); in vhost_worker_destroy()
650 if (!dev->use_worker) in vhost_workers_free()
653 for (i = 0; i < dev->nvqs; i++) in vhost_workers_free()
654 rcu_assign_pointer(dev->vqs[i]->worker, NULL); in vhost_workers_free()
659 xa_for_each(&dev->worker_xa, i, worker) in vhost_workers_free()
661 xa_destroy(&dev->worker_xa); in vhost_workers_free()
676 worker->dev = dev; in vhost_worker_create()
677 snprintf(name, sizeof(name), "vhost-%d", current->pid); in vhost_worker_create()
684 mutex_init(&worker->mutex); in vhost_worker_create()
685 init_llist_head(&worker->work_list); in vhost_worker_create()
686 worker->kcov_handle = kcov_common_handle(); in vhost_worker_create()
687 worker->vtsk = vtsk; in vhost_worker_create()
691 ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL); in vhost_worker_create()
694 worker->id = id; in vhost_worker_create()
711 mutex_lock(&worker->mutex); in __vhost_vq_attach_worker()
712 if (worker->killed) { in __vhost_vq_attach_worker()
713 mutex_unlock(&worker->mutex); in __vhost_vq_attach_worker()
717 mutex_lock(&vq->mutex); in __vhost_vq_attach_worker()
719 old_worker = rcu_dereference_check(vq->worker, in __vhost_vq_attach_worker()
720 lockdep_is_held(&vq->mutex)); in __vhost_vq_attach_worker()
721 rcu_assign_pointer(vq->worker, worker); in __vhost_vq_attach_worker()
722 worker->attachment_cnt++; in __vhost_vq_attach_worker()
725 mutex_unlock(&vq->mutex); in __vhost_vq_attach_worker()
726 mutex_unlock(&worker->mutex); in __vhost_vq_attach_worker()
729 mutex_unlock(&vq->mutex); in __vhost_vq_attach_worker()
730 mutex_unlock(&worker->mutex); in __vhost_vq_attach_worker()
736 mutex_lock(&old_worker->mutex); in __vhost_vq_attach_worker()
737 if (old_worker->killed) { in __vhost_vq_attach_worker()
738 mutex_unlock(&old_worker->mutex); in __vhost_vq_attach_worker()
749 mutex_lock(&vq->mutex); in __vhost_vq_attach_worker()
750 if (!vhost_vq_get_backend(vq) && !vq->kick) { in __vhost_vq_attach_worker()
751 mutex_unlock(&vq->mutex); in __vhost_vq_attach_worker()
753 old_worker->attachment_cnt--; in __vhost_vq_attach_worker()
754 mutex_unlock(&old_worker->mutex); in __vhost_vq_attach_worker()
756 * vsock can queue anytime after VHOST_VSOCK_SET_GUEST_CID. in __vhost_vq_attach_worker()
760 WARN_ON(!old_worker->attachment_cnt && in __vhost_vq_attach_worker()
761 !llist_empty(&old_worker->work_list)); in __vhost_vq_attach_worker()
764 mutex_unlock(&vq->mutex); in __vhost_vq_attach_worker()
770 old_worker->attachment_cnt--; in __vhost_vq_attach_worker()
771 mutex_unlock(&old_worker->mutex); in __vhost_vq_attach_worker()
778 unsigned long index = info->worker_id; in vhost_vq_attach_worker()
779 struct vhost_dev *dev = vq->dev; in vhost_vq_attach_worker()
782 if (!dev->use_worker) in vhost_vq_attach_worker()
783 return -EINVAL; in vhost_vq_attach_worker()
785 worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT); in vhost_vq_attach_worker()
786 if (!worker || worker->id != info->worker_id) in vhost_vq_attach_worker()
787 return -ENODEV; in vhost_vq_attach_worker()
801 return -ENOMEM; in vhost_new_worker()
803 info->worker_id = worker->id; in vhost_new_worker()
811 unsigned long index = info->worker_id; in vhost_free_worker()
814 worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT); in vhost_free_worker()
815 if (!worker || worker->id != info->worker_id) in vhost_free_worker()
816 return -ENODEV; in vhost_free_worker()
818 mutex_lock(&worker->mutex); in vhost_free_worker()
819 if (worker->attachment_cnt || worker->killed) { in vhost_free_worker()
820 mutex_unlock(&worker->mutex); in vhost_free_worker()
821 return -EBUSY; in vhost_free_worker()
829 mutex_unlock(&worker->mutex); in vhost_free_worker()
846 if (idx >= dev->nvqs) in vhost_get_vq_from_user()
847 return -ENOBUFS; in vhost_get_vq_from_user()
849 idx = array_index_nospec(idx, dev->nvqs); in vhost_get_vq_from_user()
851 *vq = dev->vqs[idx]; in vhost_get_vq_from_user()
867 if (!dev->use_worker) in vhost_worker_ioctl()
868 return -EINVAL; in vhost_worker_ioctl()
871 return -EINVAL; in vhost_worker_ioctl()
882 ret = -EFAULT; in vhost_worker_ioctl()
886 return -EFAULT; in vhost_worker_ioctl()
893 return -ENOIOCTLCMD; in vhost_worker_ioctl()
903 ret = -EFAULT; in vhost_worker_ioctl()
910 worker = rcu_dereference_check(vq->worker, in vhost_worker_ioctl()
911 lockdep_is_held(&dev->mutex)); in vhost_worker_ioctl()
913 ret = -EINVAL; in vhost_worker_ioctl()
918 ring_worker.worker_id = worker->id; in vhost_worker_ioctl()
921 ret = -EFAULT; in vhost_worker_ioctl()
924 ret = -ENOIOCTLCMD; in vhost_worker_ioctl()
940 err = -EBUSY; in vhost_dev_set_owner()
950 if (dev->use_worker) { in vhost_dev_set_owner()
952 * This should be done last, because vsock can queue work in vhost_dev_set_owner()
954 * below since we don't have to worry about vsock queueing in vhost_dev_set_owner()
959 err = -ENOMEM; in vhost_dev_set_owner()
963 for (i = 0; i < dev->nvqs; i++) in vhost_dev_set_owner()
964 __vhost_vq_attach_worker(dev->vqs[i], worker); in vhost_dev_set_owner()
997 dev->umem = umem; in vhost_dev_reset_owner()
1001 for (i = 0; i < dev->nvqs; ++i) in vhost_dev_reset_owner()
1002 dev->vqs[i]->umem = umem; in vhost_dev_reset_owner()
1010 for (i = 0; i < dev->nvqs; ++i) { in vhost_dev_stop()
1011 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) in vhost_dev_stop()
1012 vhost_poll_stop(&dev->vqs[i]->poll); in vhost_dev_stop()
1023 spin_lock(&dev->iotlb_lock); in vhost_clear_msg()
1025 list_for_each_entry_safe(node, n, &dev->read_list, node) { in vhost_clear_msg()
1026 list_del(&node->node); in vhost_clear_msg()
1030 list_for_each_entry_safe(node, n, &dev->pending_list, node) { in vhost_clear_msg()
1031 list_del(&node->node); in vhost_clear_msg()
1035 spin_unlock(&dev->iotlb_lock); in vhost_clear_msg()
1043 for (i = 0; i < dev->nvqs; ++i) { in vhost_dev_cleanup()
1044 if (dev->vqs[i]->error_ctx) in vhost_dev_cleanup()
1045 eventfd_ctx_put(dev->vqs[i]->error_ctx); in vhost_dev_cleanup()
1046 if (dev->vqs[i]->kick) in vhost_dev_cleanup()
1047 fput(dev->vqs[i]->kick); in vhost_dev_cleanup()
1048 if (dev->vqs[i]->call_ctx.ctx) in vhost_dev_cleanup()
1049 eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx); in vhost_dev_cleanup()
1050 vhost_vq_reset(dev, dev->vqs[i]); in vhost_dev_cleanup()
1053 if (dev->log_ctx) in vhost_dev_cleanup()
1054 eventfd_ctx_put(dev->log_ctx); in vhost_dev_cleanup()
1055 dev->log_ctx = NULL; in vhost_dev_cleanup()
1057 vhost_iotlb_free(dev->umem); in vhost_dev_cleanup()
1058 dev->umem = NULL; in vhost_dev_cleanup()
1059 vhost_iotlb_free(dev->iotlb); in vhost_dev_cleanup()
1060 dev->iotlb = NULL; in vhost_dev_cleanup()
1062 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); in vhost_dev_cleanup()
1073 if (a > ULONG_MAX - (unsigned long)log_base || in log_access_ok()
1078 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); in log_access_ok()
1090 return uaddr > ULONG_MAX - size + 1; in vhost_overflow()
1102 list_for_each_entry(map, &umem->list, link) { in vq_memory_access_ok()
1103 unsigned long a = map->addr; in vq_memory_access_ok()
1105 if (vhost_overflow(map->addr, map->size)) in vq_memory_access_ok()
1109 if (!access_ok((void __user *)a, map->size)) in vq_memory_access_ok()
1112 map->start, in vq_memory_access_ok()
1113 map->size)) in vq_memory_access_ok()
1123 const struct vhost_iotlb_map *map = vq->meta_iotlb[type]; in vhost_vq_meta_fetch()
1128 return (void __user *)(uintptr_t)(map->addr + addr - map->start); in vhost_vq_meta_fetch()
1138 for (i = 0; i < d->nvqs; ++i) { in memory_access_ok()
1142 mutex_lock(&d->vqs[i]->mutex); in memory_access_ok()
1143 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL); in memory_access_ok()
1145 if (d->vqs[i]->private_data) in memory_access_ok()
1146 ok = vq_memory_access_ok(d->vqs[i]->log_base, in memory_access_ok()
1150 mutex_unlock(&d->vqs[i]->mutex); in memory_access_ok()
1165 if (!vq->iotlb) in vhost_copy_to_user()
1170 * could be access through iotlb. So -EAGAIN should in vhost_copy_to_user()
1181 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov, in vhost_copy_to_user()
1182 ARRAY_SIZE(vq->iotlb_iov), in vhost_copy_to_user()
1186 iov_iter_init(&t, ITER_DEST, vq->iotlb_iov, ret, size); in vhost_copy_to_user()
1200 if (!vq->iotlb) in vhost_copy_from_user()
1205 * could be access through iotlb. So -EAGAIN should in vhost_copy_from_user()
1216 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov, in vhost_copy_from_user()
1217 ARRAY_SIZE(vq->iotlb_iov), in vhost_copy_from_user()
1225 iov_iter_init(&f, ITER_SOURCE, vq->iotlb_iov, ret, size); in vhost_copy_from_user()
1241 ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov, in __vhost_get_user_slow()
1242 ARRAY_SIZE(vq->iotlb_iov), in __vhost_get_user_slow()
1251 if (ret != 1 || vq->iotlb_iov[0].iov_len != size) { in __vhost_get_user_slow()
1258 return vq->iotlb_iov[0].iov_base; in __vhost_get_user_slow()
1263 * could be access through iotlb. So -EAGAIN should
1281 if (!vq->iotlb) { \
1290 ret = -EFAULT; \
1297 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx), in vhost_put_avail_event()
1305 return vhost_copy_to_user(vq, vq->used->ring + idx, head, in vhost_put_used()
1312 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags), in vhost_put_used_flags()
1313 &vq->used->flags); in vhost_put_used_flags()
1319 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx), in vhost_put_used_idx()
1320 &vq->used->idx); in vhost_put_used_idx()
1326 if (!vq->iotlb) { \
1336 ret = -EFAULT; \
1350 for (i = 0; i < d->nvqs; ++i) in vhost_dev_lock_vqs()
1351 mutex_lock_nested(&d->vqs[i]->mutex, i); in vhost_dev_lock_vqs()
1357 for (i = 0; i < d->nvqs; ++i) in vhost_dev_unlock_vqs()
1358 mutex_unlock(&d->vqs[i]->mutex); in vhost_dev_unlock_vqs()
1364 return vhost_get_avail(vq, *idx, &vq->avail->idx); in vhost_get_avail_idx()
1371 &vq->avail->ring[idx & (vq->num - 1)]); in vhost_get_avail_head()
1377 return vhost_get_avail(vq, *flags, &vq->avail->flags); in vhost_get_avail_flags()
1389 return vhost_get_used(vq, *idx, &vq->used->idx); in vhost_get_used_idx()
1395 return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc)); in vhost_get_desc()
1403 spin_lock(&d->iotlb_lock); in vhost_iotlb_notify_vq()
1405 list_for_each_entry_safe(node, n, &d->pending_list, node) { in vhost_iotlb_notify_vq()
1406 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; in vhost_iotlb_notify_vq()
1407 if (msg->iova <= vq_msg->iova && in vhost_iotlb_notify_vq()
1408 msg->iova + msg->size - 1 >= vq_msg->iova && in vhost_iotlb_notify_vq()
1409 vq_msg->type == VHOST_IOTLB_MISS) { in vhost_iotlb_notify_vq()
1410 vhost_poll_queue(&node->vq->poll); in vhost_iotlb_notify_vq()
1411 list_del(&node->node); in vhost_iotlb_notify_vq()
1416 spin_unlock(&d->iotlb_lock); in vhost_iotlb_notify_vq()
1442 return -EINVAL; in vhost_process_iotlb_msg()
1444 mutex_lock(&dev->mutex); in vhost_process_iotlb_msg()
1446 switch (msg->type) { in vhost_process_iotlb_msg()
1448 if (!dev->iotlb) { in vhost_process_iotlb_msg()
1449 ret = -EFAULT; in vhost_process_iotlb_msg()
1452 if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) { in vhost_process_iotlb_msg()
1453 ret = -EFAULT; in vhost_process_iotlb_msg()
1457 if (vhost_iotlb_add_range(dev->iotlb, msg->iova, in vhost_process_iotlb_msg()
1458 msg->iova + msg->size - 1, in vhost_process_iotlb_msg()
1459 msg->uaddr, msg->perm)) { in vhost_process_iotlb_msg()
1460 ret = -ENOMEM; in vhost_process_iotlb_msg()
1466 if (!dev->iotlb) { in vhost_process_iotlb_msg()
1467 ret = -EFAULT; in vhost_process_iotlb_msg()
1471 vhost_iotlb_del_range(dev->iotlb, msg->iova, in vhost_process_iotlb_msg()
1472 msg->iova + msg->size - 1); in vhost_process_iotlb_msg()
1475 ret = -EINVAL; in vhost_process_iotlb_msg()
1480 mutex_unlock(&dev->mutex); in vhost_process_iotlb_msg()
1494 ret = -EINVAL; in vhost_chr_write_iter()
1503 offset = offsetof(struct vhost_msg, iotlb) - sizeof(int); in vhost_chr_write_iter()
1506 if (vhost_backend_has_feature(dev->vqs[0], in vhost_chr_write_iter()
1510 ret = -EINVAL; in vhost_chr_write_iter()
1518 ret = -EINVAL; in vhost_chr_write_iter()
1525 ret = -EINVAL; in vhost_chr_write_iter()
1530 ret = -EINVAL; in vhost_chr_write_iter()
1534 if (dev->msg_handler) in vhost_chr_write_iter()
1535 ret = dev->msg_handler(dev, asid, &msg); in vhost_chr_write_iter()
1539 ret = -EFAULT; in vhost_chr_write_iter()
1555 poll_wait(file, &dev->wait, wait); in vhost_chr_poll()
1557 if (!list_empty(&dev->read_list)) in vhost_chr_poll()
1577 prepare_to_wait(&dev->wait, &wait, in vhost_chr_read_iter()
1580 node = vhost_dequeue_msg(dev, &dev->read_list); in vhost_chr_read_iter()
1584 ret = -EAGAIN; in vhost_chr_read_iter()
1588 ret = -ERESTARTSYS; in vhost_chr_read_iter()
1591 if (!dev->iotlb) { in vhost_chr_read_iter()
1592 ret = -EBADFD; in vhost_chr_read_iter()
1600 finish_wait(&dev->wait, &wait); in vhost_chr_read_iter()
1604 void *start = &node->msg; in vhost_chr_read_iter()
1606 switch (node->msg.type) { in vhost_chr_read_iter()
1608 size = sizeof(node->msg); in vhost_chr_read_iter()
1609 msg = &node->msg.iotlb; in vhost_chr_read_iter()
1612 size = sizeof(node->msg_v2); in vhost_chr_read_iter()
1613 msg = &node->msg_v2.iotlb; in vhost_chr_read_iter()
1621 if (ret != size || msg->type != VHOST_IOTLB_MISS) { in vhost_chr_read_iter()
1625 vhost_enqueue_msg(dev, &dev->pending_list, node); in vhost_chr_read_iter()
1634 struct vhost_dev *dev = vq->dev; in vhost_iotlb_miss()
1641 return -ENOMEM; in vhost_iotlb_miss()
1644 node->msg_v2.type = VHOST_IOTLB_MSG_V2; in vhost_iotlb_miss()
1645 msg = &node->msg_v2.iotlb; in vhost_iotlb_miss()
1647 msg = &node->msg.iotlb; in vhost_iotlb_miss()
1650 msg->type = VHOST_IOTLB_MISS; in vhost_iotlb_miss()
1651 msg->iova = iova; in vhost_iotlb_miss()
1652 msg->perm = access; in vhost_iotlb_miss()
1654 vhost_enqueue_msg(dev, &dev->read_list, node); in vhost_iotlb_miss()
1667 if (vq->iotlb) in vq_access_ok()
1682 if (likely(map->perm & access)) in vhost_vq_meta_update()
1683 vq->meta_iotlb[type] = map; in vhost_vq_meta_update()
1690 struct vhost_iotlb *umem = vq->iotlb; in iotlb_access_ok()
1691 u64 s = 0, size, orig_addr = addr, last = addr + len - 1; in iotlb_access_ok()
1698 if (map == NULL || map->start > addr) { in iotlb_access_ok()
1701 } else if (!(map->perm & access)) { in iotlb_access_ok()
1708 size = map->size - addr + map->start; in iotlb_access_ok()
1722 unsigned int num = vq->num; in vq_meta_prefetch()
1724 if (!vq->iotlb) in vq_meta_prefetch()
1727 return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc, in vq_meta_prefetch()
1729 iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail, in vq_meta_prefetch()
1732 iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used, in vq_meta_prefetch()
1741 return memory_access_ok(dev, dev->umem, 1); in vhost_log_access_ok()
1752 if (vq->iotlb) in vq_log_used_access_ok()
1756 vhost_get_used_size(vq, vq->num)); in vq_log_used_access_ok()
1764 return vq_memory_access_ok(log_base, vq->umem, in vq_log_access_ok()
1766 vq_log_used_access_ok(vq, log_base, vq->log_used, vq->log_addr); in vq_log_access_ok()
1773 if (!vq_log_access_ok(vq, vq->log_base)) in vhost_vq_access_ok()
1776 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used); in vhost_vq_access_ok()
1789 return -EFAULT; in vhost_set_memory()
1791 return -EOPNOTSUPP; in vhost_set_memory()
1793 return -E2BIG; in vhost_set_memory()
1797 return -ENOMEM; in vhost_set_memory()
1800 if (copy_from_user(newmem->regions, m->regions, in vhost_set_memory()
1803 return -EFAULT; in vhost_set_memory()
1809 return -ENOMEM; in vhost_set_memory()
1812 for (region = newmem->regions; in vhost_set_memory()
1813 region < newmem->regions + mem.nregions; in vhost_set_memory()
1816 region->guest_phys_addr, in vhost_set_memory()
1817 region->guest_phys_addr + in vhost_set_memory()
1818 region->memory_size - 1, in vhost_set_memory()
1819 region->userspace_addr, in vhost_set_memory()
1827 oldumem = d->umem; in vhost_set_memory()
1828 d->umem = newumem; in vhost_set_memory()
1831 for (i = 0; i < d->nvqs; ++i) { in vhost_set_memory()
1832 mutex_lock(&d->vqs[i]->mutex); in vhost_set_memory()
1833 d->vqs[i]->umem = newumem; in vhost_set_memory()
1834 mutex_unlock(&d->vqs[i]->mutex); in vhost_set_memory()
1844 return -EFAULT; in vhost_set_memory()
1855 if (vq->private_data) in vhost_vring_set_num()
1856 return -EBUSY; in vhost_vring_set_num()
1859 return -EFAULT; in vhost_vring_set_num()
1861 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) in vhost_vring_set_num()
1862 return -EINVAL; in vhost_vring_set_num()
1863 vq->num = s.num; in vhost_vring_set_num()
1875 return -EFAULT; in vhost_vring_set_addr()
1877 return -EOPNOTSUPP; in vhost_vring_set_addr()
1884 return -EFAULT; in vhost_vring_set_addr()
1887 BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE); in vhost_vring_set_addr()
1888 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); in vhost_vring_set_addr()
1889 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) || in vhost_vring_set_addr()
1890 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) || in vhost_vring_set_addr()
1891 (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) in vhost_vring_set_addr()
1892 return -EINVAL; in vhost_vring_set_addr()
1897 if (vq->private_data) { in vhost_vring_set_addr()
1898 if (!vq_access_ok(vq, vq->num, in vhost_vring_set_addr()
1902 return -EINVAL; in vhost_vring_set_addr()
1905 if (!vq_log_used_access_ok(vq, vq->log_base, in vhost_vring_set_addr()
1908 return -EINVAL; in vhost_vring_set_addr()
1911 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG)); in vhost_vring_set_addr()
1912 vq->desc = (void __user *)(unsigned long)a.desc_user_addr; in vhost_vring_set_addr()
1913 vq->avail = (void __user *)(unsigned long)a.avail_user_addr; in vhost_vring_set_addr()
1914 vq->log_addr = a.log_guest_addr; in vhost_vring_set_addr()
1915 vq->used = (void __user *)(unsigned long)a.used_user_addr; in vhost_vring_set_addr()
1927 mutex_lock(&vq->mutex); in vhost_vring_set_num_addr()
1940 mutex_unlock(&vq->mutex); in vhost_vring_set_num_addr()
1964 mutex_lock(&vq->mutex); in vhost_vring_ioctl()
1970 if (vq->private_data) { in vhost_vring_ioctl()
1971 r = -EBUSY; in vhost_vring_ioctl()
1975 r = -EFAULT; in vhost_vring_ioctl()
1979 vq->last_avail_idx = s.num & 0xffff; in vhost_vring_ioctl()
1980 vq->last_used_idx = (s.num >> 16) & 0xffff; in vhost_vring_ioctl()
1983 r = -EINVAL; in vhost_vring_ioctl()
1986 vq->last_avail_idx = s.num; in vhost_vring_ioctl()
1989 vq->avail_idx = vq->last_avail_idx; in vhost_vring_ioctl()
1994 s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16); in vhost_vring_ioctl()
1996 s.num = vq->last_avail_idx; in vhost_vring_ioctl()
1998 r = -EFAULT; in vhost_vring_ioctl()
2002 r = -EFAULT; in vhost_vring_ioctl()
2010 if (eventfp != vq->kick) { in vhost_vring_ioctl()
2011 pollstop = (filep = vq->kick) != NULL; in vhost_vring_ioctl()
2012 pollstart = (vq->kick = eventfp) != NULL; in vhost_vring_ioctl()
2018 r = -EFAULT; in vhost_vring_ioctl()
2027 swap(ctx, vq->call_ctx.ctx); in vhost_vring_ioctl()
2031 r = -EFAULT; in vhost_vring_ioctl()
2039 swap(ctx, vq->error_ctx); in vhost_vring_ioctl()
2049 r = -EFAULT; in vhost_vring_ioctl()
2052 vq->busyloop_timeout = s.num; in vhost_vring_ioctl()
2056 s.num = vq->busyloop_timeout; in vhost_vring_ioctl()
2058 r = -EFAULT; in vhost_vring_ioctl()
2061 r = -ENOIOCTLCMD; in vhost_vring_ioctl()
2064 if (pollstop && vq->handle_kick) in vhost_vring_ioctl()
2065 vhost_poll_stop(&vq->poll); in vhost_vring_ioctl()
2072 if (pollstart && vq->handle_kick) in vhost_vring_ioctl()
2073 r = vhost_poll_start(&vq->poll, vq->kick); in vhost_vring_ioctl()
2075 mutex_unlock(&vq->mutex); in vhost_vring_ioctl()
2077 if (pollstop && vq->handle_kick) in vhost_vring_ioctl()
2078 vhost_dev_flush(vq->poll.dev); in vhost_vring_ioctl()
2090 return -ENOMEM; in vhost_init_device_iotlb()
2092 oiotlb = d->iotlb; in vhost_init_device_iotlb()
2093 d->iotlb = niotlb; in vhost_init_device_iotlb()
2095 for (i = 0; i < d->nvqs; ++i) { in vhost_init_device_iotlb()
2096 struct vhost_virtqueue *vq = d->vqs[i]; in vhost_init_device_iotlb()
2098 mutex_lock(&vq->mutex); in vhost_init_device_iotlb()
2099 vq->iotlb = niotlb; in vhost_init_device_iotlb()
2101 mutex_unlock(&vq->mutex); in vhost_init_device_iotlb()
2135 r = -EFAULT; in vhost_dev_ioctl()
2139 r = -EFAULT; in vhost_dev_ioctl()
2142 for (i = 0; i < d->nvqs; ++i) { in vhost_dev_ioctl()
2145 vq = d->vqs[i]; in vhost_dev_ioctl()
2146 mutex_lock(&vq->mutex); in vhost_dev_ioctl()
2148 if (vq->private_data && !vq_log_access_ok(vq, base)) in vhost_dev_ioctl()
2149 r = -EFAULT; in vhost_dev_ioctl()
2151 vq->log_base = base; in vhost_dev_ioctl()
2152 mutex_unlock(&vq->mutex); in vhost_dev_ioctl()
2164 swap(ctx, d->log_ctx); in vhost_dev_ioctl()
2165 for (i = 0; i < d->nvqs; ++i) { in vhost_dev_ioctl()
2166 mutex_lock(&d->vqs[i]->mutex); in vhost_dev_ioctl()
2167 d->vqs[i]->log_ctx = d->log_ctx; in vhost_dev_ioctl()
2168 mutex_unlock(&d->vqs[i]->mutex); in vhost_dev_ioctl()
2174 r = -ENOIOCTLCMD; in vhost_dev_ioctl()
2184 * returning -EFAULT). See Documentation/arch/x86/exception-tables.rst.
2219 return -EFAULT; in log_write()
2225 write_length -= VHOST_PAGE_SIZE; in log_write()
2233 struct vhost_iotlb *umem = vq->umem; in log_write_hva()
2244 list_for_each_entry(u, &umem->list, link) { in log_write_hva()
2245 if (u->addr > hva - 1 + len || in log_write_hva()
2246 u->addr - 1 + u->size < hva) in log_write_hva()
2248 start = max(u->addr, hva); in log_write_hva()
2249 end = min(u->addr - 1 + u->size, hva - 1 + len); in log_write_hva()
2250 l = end - start + 1; in log_write_hva()
2251 r = log_write(vq->log_base, in log_write_hva()
2252 u->start + start - u->addr, in log_write_hva()
2261 return -EFAULT; in log_write_hva()
2263 len -= min; in log_write_hva()
2272 struct iovec *iov = vq->log_iov; in log_used()
2275 if (!vq->iotlb) in log_used()
2276 return log_write(vq->log_base, vq->log_addr + used_offset, len); in log_used()
2278 ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, in log_used()
2301 if (vq->iotlb) { in vhost_log_write()
2313 r = log_write(vq->log_base, log[i].addr, l); in vhost_log_write()
2316 len -= l; in vhost_log_write()
2318 if (vq->log_ctx) in vhost_log_write()
2319 eventfd_signal(vq->log_ctx, 1); in vhost_log_write()
2333 return -EFAULT; in vhost_update_used_flags()
2334 if (unlikely(vq->log_used)) { in vhost_update_used_flags()
2338 used = &vq->used->flags; in vhost_update_used_flags()
2339 log_used(vq, (used - (void __user *)vq->used), in vhost_update_used_flags()
2340 sizeof vq->used->flags); in vhost_update_used_flags()
2341 if (vq->log_ctx) in vhost_update_used_flags()
2342 eventfd_signal(vq->log_ctx, 1); in vhost_update_used_flags()
2350 return -EFAULT; in vhost_update_avail_event()
2351 if (unlikely(vq->log_used)) { in vhost_update_avail_event()
2357 log_used(vq, (used - (void __user *)vq->used), in vhost_update_avail_event()
2359 if (vq->log_ctx) in vhost_update_avail_event()
2360 eventfd_signal(vq->log_ctx, 1); in vhost_update_avail_event()
2369 bool is_le = vq->is_le; in vhost_vq_init_access()
2371 if (!vq->private_data) in vhost_vq_init_access()
2379 vq->signalled_used_valid = false; in vhost_vq_init_access()
2380 if (!vq->iotlb && in vhost_vq_init_access()
2381 !access_ok(&vq->used->idx, sizeof vq->used->idx)) { in vhost_vq_init_access()
2382 r = -EFAULT; in vhost_vq_init_access()
2388 &vq->used->idx); in vhost_vq_init_access()
2391 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx); in vhost_vq_init_access()
2395 vq->is_le = is_le; in vhost_vq_init_access()
2404 struct vhost_dev *dev = vq->dev; in translate_desc()
2405 struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem; in translate_desc()
2407 u64 s = 0, last = addr + len - 1; in translate_desc()
2413 ret = -ENOBUFS; in translate_desc()
2418 if (map == NULL || map->start > addr) { in translate_desc()
2419 if (umem != dev->iotlb) { in translate_desc()
2420 ret = -EFAULT; in translate_desc()
2423 ret = -EAGAIN; in translate_desc()
2425 } else if (!(map->perm & access)) { in translate_desc()
2426 ret = -EPERM; in translate_desc()
2431 size = map->size - addr + map->start; in translate_desc()
2432 _iov->iov_len = min((u64)len - s, size); in translate_desc()
2433 _iov->iov_base = (void __user *)(unsigned long) in translate_desc()
2434 (map->addr + addr - map->start); in translate_desc()
2440 if (ret == -EAGAIN) in translate_desc()
2447 * or -1U if we're at the end. */
2453 if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT))) in next_desc()
2454 return -1U; in next_desc()
2457 next = vhost16_to_cpu(vq, READ_ONCE(desc->next)); in next_desc()
2469 u32 len = vhost32_to_cpu(vq, indirect->len); in get_indirect()
2479 return -EINVAL; in get_indirect()
2482 ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect, in get_indirect()
2485 if (ret != -EAGAIN) in get_indirect()
2489 iov_iter_init(&from, ITER_SOURCE, vq->indirect, ret, len); in get_indirect()
2495 indirect->len); in get_indirect()
2496 return -E2BIG; in get_indirect()
2505 return -EINVAL; in get_indirect()
2509 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); in get_indirect()
2510 return -EINVAL; in get_indirect()
2514 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); in get_indirect()
2515 return -EINVAL; in get_indirect()
2525 iov_size - iov_count, access); in get_indirect()
2527 if (ret != -EAGAIN) in get_indirect()
2546 return -EINVAL; in get_indirect()
2550 } while ((i = next_desc(vq, &desc)) != -1); in get_indirect()
2559 * This function returns the descriptor number found, or vq->num (which is
2575 last_avail_idx = vq->last_avail_idx; in vhost_get_vq_desc()
2577 if (vq->avail_idx == vq->last_avail_idx) { in vhost_get_vq_desc()
2580 &vq->avail->idx); in vhost_get_vq_desc()
2581 return -EFAULT; in vhost_get_vq_desc()
2583 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); in vhost_get_vq_desc()
2585 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { in vhost_get_vq_desc()
2587 last_avail_idx, vq->avail_idx); in vhost_get_vq_desc()
2588 return -EFAULT; in vhost_get_vq_desc()
2594 if (vq->avail_idx == last_avail_idx) in vhost_get_vq_desc()
2595 return vq->num; in vhost_get_vq_desc()
2608 &vq->avail->ring[last_avail_idx % vq->num]); in vhost_get_vq_desc()
2609 return -EFAULT; in vhost_get_vq_desc()
2615 if (unlikely(head >= vq->num)) { in vhost_get_vq_desc()
2617 head, vq->num); in vhost_get_vq_desc()
2618 return -EINVAL; in vhost_get_vq_desc()
2629 if (unlikely(i >= vq->num)) { in vhost_get_vq_desc()
2631 i, vq->num, head); in vhost_get_vq_desc()
2632 return -EINVAL; in vhost_get_vq_desc()
2634 if (unlikely(++found > vq->num)) { in vhost_get_vq_desc()
2637 i, vq->num, head); in vhost_get_vq_desc()
2638 return -EINVAL; in vhost_get_vq_desc()
2643 i, vq->desc + i); in vhost_get_vq_desc()
2644 return -EFAULT; in vhost_get_vq_desc()
2651 if (ret != -EAGAIN) in vhost_get_vq_desc()
2665 iov_size - iov_count, access); in vhost_get_vq_desc()
2667 if (ret != -EAGAIN) in vhost_get_vq_desc()
2687 return -EINVAL; in vhost_get_vq_desc()
2691 } while ((i = next_desc(vq, &desc)) != -1); in vhost_get_vq_desc()
2694 vq->last_avail_idx++; in vhost_get_vq_desc()
2698 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); in vhost_get_vq_desc()
2706 vq->last_avail_idx -= n; in vhost_discard_vq_desc()
2731 start = vq->last_used_idx & (vq->num - 1); in __vhost_add_used_n()
2732 used = vq->used->ring + start; in __vhost_add_used_n()
2735 return -EFAULT; in __vhost_add_used_n()
2737 if (unlikely(vq->log_used)) { in __vhost_add_used_n()
2741 log_used(vq, ((void __user *)used - (void __user *)vq->used), in __vhost_add_used_n()
2744 old = vq->last_used_idx; in __vhost_add_used_n()
2745 new = (vq->last_used_idx += count); in __vhost_add_used_n()
2750 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old))) in __vhost_add_used_n()
2751 vq->signalled_used_valid = false; in __vhost_add_used_n()
2762 start = vq->last_used_idx & (vq->num - 1); in vhost_add_used_n()
2763 n = vq->num - start; in vhost_add_used_n()
2769 count -= n; in vhost_add_used_n()
2777 return -EFAULT; in vhost_add_used_n()
2779 if (unlikely(vq->log_used)) { in vhost_add_used_n()
2784 sizeof vq->used->idx); in vhost_add_used_n()
2785 if (vq->log_ctx) in vhost_add_used_n()
2786 eventfd_signal(vq->log_ctx, 1); in vhost_add_used_n()
2803 unlikely(vq->avail_idx == vq->last_avail_idx)) in vhost_notify()
2814 old = vq->signalled_used; in vhost_notify()
2815 v = vq->signalled_used_valid; in vhost_notify()
2816 new = vq->signalled_used = vq->last_used_idx; in vhost_notify()
2817 vq->signalled_used_valid = true; in vhost_notify()
2833 if (vq->call_ctx.ctx && vhost_notify(dev, vq)) in vhost_signal()
2834 eventfd_signal(vq->call_ctx.ctx, 1); in vhost_signal()
2848 /* multi-buffer version of vhost_add_used_and_signal */
2864 if (vq->avail_idx != vq->last_avail_idx) in vhost_vq_avail_empty()
2871 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); in vhost_vq_avail_empty()
2872 if (vq->avail_idx != vq->last_avail_idx) { in vhost_vq_avail_empty()
2892 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) in vhost_enable_notify()
2894 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; in vhost_enable_notify()
2899 &vq->used->flags, r); in vhost_enable_notify()
2916 &vq->avail->idx, r); in vhost_enable_notify()
2920 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); in vhost_enable_notify()
2921 if (vq->avail_idx != vq->last_avail_idx) { in vhost_enable_notify()
2940 if (vq->used_flags & VRING_USED_F_NO_NOTIFY) in vhost_disable_notify()
2942 vq->used_flags |= VRING_USED_F_NO_NOTIFY; in vhost_disable_notify()
2946 vq_err(vq, "Failed to disable notification at %p: %d\n", in vhost_disable_notify()
2947 &vq->used->flags, r); in vhost_disable_notify()
2960 node->vq = vq; in vhost_new_msg()
2961 node->msg.type = type; in vhost_new_msg()
2969 spin_lock(&dev->iotlb_lock); in vhost_enqueue_msg()
2970 list_add_tail(&node->node, head); in vhost_enqueue_msg()
2971 spin_unlock(&dev->iotlb_lock); in vhost_enqueue_msg()
2973 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); in vhost_enqueue_msg()
2982 spin_lock(&dev->iotlb_lock); in vhost_dequeue_msg()
2986 list_del(&node->node); in vhost_dequeue_msg()
2988 spin_unlock(&dev->iotlb_lock); in vhost_dequeue_msg()
2999 mutex_lock(&dev->mutex); in vhost_set_backend_features()
3000 for (i = 0; i < dev->nvqs; ++i) { in vhost_set_backend_features()
3001 vq = dev->vqs[i]; in vhost_set_backend_features()
3002 mutex_lock(&vq->mutex); in vhost_set_backend_features()
3003 vq->acked_backend_features = features; in vhost_set_backend_features()
3004 mutex_unlock(&vq->mutex); in vhost_set_backend_features()
3006 mutex_unlock(&dev->mutex); in vhost_set_backend_features()