Lines Matching refs:vq

49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])  argument
50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) argument
53 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) in vhost_disable_cross_endian() argument
55 vq->user_be = !virtio_legacy_is_little_endian(); in vhost_disable_cross_endian()
58 static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq) in vhost_enable_cross_endian_big() argument
60 vq->user_be = true; in vhost_enable_cross_endian_big()
63 static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq) in vhost_enable_cross_endian_little() argument
65 vq->user_be = false; in vhost_enable_cross_endian_little()
68 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) in vhost_set_vring_endian() argument
72 if (vq->private_data) in vhost_set_vring_endian()
83 vhost_enable_cross_endian_big(vq); in vhost_set_vring_endian()
85 vhost_enable_cross_endian_little(vq); in vhost_set_vring_endian()
90 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, in vhost_get_vring_endian() argument
95 .num = vq->user_be in vhost_get_vring_endian()
104 static void vhost_init_is_le(struct vhost_virtqueue *vq) in vhost_init_is_le() argument
111 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be; in vhost_init_is_le()
114 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) in vhost_disable_cross_endian() argument
118 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) in vhost_set_vring_endian() argument
123 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, in vhost_get_vring_endian() argument
129 static void vhost_init_is_le(struct vhost_virtqueue *vq) in vhost_init_is_le() argument
131 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) in vhost_init_is_le()
136 static void vhost_reset_is_le(struct vhost_virtqueue *vq) in vhost_reset_is_le() argument
138 vhost_init_is_le(vq); in vhost_reset_is_le()
191 struct vhost_virtqueue *vq) in vhost_poll_init() argument
198 poll->vq = vq; in vhost_poll_init()
249 bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work) in vhost_vq_work_queue() argument
255 worker = rcu_dereference(vq->worker); in vhost_vq_work_queue()
266 void vhost_vq_flush(struct vhost_virtqueue *vq) in vhost_vq_flush() argument
273 if (vhost_vq_work_queue(vq, &flush.work)) in vhost_vq_flush()
322 bool vhost_vq_has_work(struct vhost_virtqueue *vq) in vhost_vq_has_work() argument
328 worker = rcu_dereference(vq->worker); in vhost_vq_has_work()
339 vhost_vq_work_queue(poll->vq, &poll->work); in vhost_poll_queue()
343 static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq) in __vhost_vq_meta_reset() argument
348 vq->meta_iotlb[j] = NULL; in __vhost_vq_meta_reset()
365 bool vhost_vq_is_setup(struct vhost_virtqueue *vq) in vhost_vq_is_setup() argument
367 return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq); in vhost_vq_is_setup()
372 struct vhost_virtqueue *vq) in vhost_vq_reset() argument
374 vq->num = 1; in vhost_vq_reset()
375 vq->desc = NULL; in vhost_vq_reset()
376 vq->avail = NULL; in vhost_vq_reset()
377 vq->used = NULL; in vhost_vq_reset()
378 vq->last_avail_idx = 0; in vhost_vq_reset()
379 vq->avail_idx = 0; in vhost_vq_reset()
380 vq->last_used_idx = 0; in vhost_vq_reset()
381 vq->signalled_used = 0; in vhost_vq_reset()
382 vq->signalled_used_valid = false; in vhost_vq_reset()
383 vq->used_flags = 0; in vhost_vq_reset()
384 vq->log_used = false; in vhost_vq_reset()
385 vq->log_addr = -1ull; in vhost_vq_reset()
386 vq->private_data = NULL; in vhost_vq_reset()
387 vq->acked_features = 0; in vhost_vq_reset()
388 vq->acked_backend_features = 0; in vhost_vq_reset()
389 vq->log_base = NULL; in vhost_vq_reset()
390 vq->error_ctx = NULL; in vhost_vq_reset()
391 vq->kick = NULL; in vhost_vq_reset()
392 vq->log_ctx = NULL; in vhost_vq_reset()
393 vhost_disable_cross_endian(vq); in vhost_vq_reset()
394 vhost_reset_is_le(vq); in vhost_vq_reset()
395 vq->busyloop_timeout = 0; in vhost_vq_reset()
396 vq->umem = NULL; in vhost_vq_reset()
397 vq->iotlb = NULL; in vhost_vq_reset()
398 rcu_assign_pointer(vq->worker, NULL); in vhost_vq_reset()
399 vhost_vring_call_reset(&vq->call_ctx); in vhost_vq_reset()
400 __vhost_vq_meta_reset(vq); in vhost_vq_reset()
432 struct vhost_virtqueue *vq; in vhost_worker_killed() local
439 vq = dev->vqs[i]; in vhost_worker_killed()
441 mutex_lock(&vq->mutex); in vhost_worker_killed()
443 rcu_dereference_check(vq->worker, in vhost_worker_killed()
444 lockdep_is_held(&vq->mutex))) { in vhost_worker_killed()
445 rcu_assign_pointer(vq->worker, NULL); in vhost_worker_killed()
448 mutex_unlock(&vq->mutex); in vhost_worker_killed()
462 static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq) in vhost_vq_free_iovecs() argument
464 kfree(vq->indirect); in vhost_vq_free_iovecs()
465 vq->indirect = NULL; in vhost_vq_free_iovecs()
466 kfree(vq->log); in vhost_vq_free_iovecs()
467 vq->log = NULL; in vhost_vq_free_iovecs()
468 kfree(vq->heads); in vhost_vq_free_iovecs()
469 vq->heads = NULL; in vhost_vq_free_iovecs()
475 struct vhost_virtqueue *vq; in vhost_dev_alloc_iovecs() local
479 vq = dev->vqs[i]; in vhost_dev_alloc_iovecs()
480 vq->indirect = kmalloc_array(UIO_MAXIOV, in vhost_dev_alloc_iovecs()
481 sizeof(*vq->indirect), in vhost_dev_alloc_iovecs()
483 vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log), in vhost_dev_alloc_iovecs()
485 vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads), in vhost_dev_alloc_iovecs()
487 if (!vq->indirect || !vq->log || !vq->heads) in vhost_dev_alloc_iovecs()
506 bool vhost_exceeds_weight(struct vhost_virtqueue *vq, in vhost_exceeds_weight() argument
509 struct vhost_dev *dev = vq->dev; in vhost_exceeds_weight()
513 vhost_poll_queue(&vq->poll); in vhost_exceeds_weight()
521 static size_t vhost_get_avail_size(struct vhost_virtqueue *vq, in vhost_get_avail_size() argument
525 vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; in vhost_get_avail_size()
527 return size_add(struct_size(vq->avail, ring, num), event); in vhost_get_avail_size()
530 static size_t vhost_get_used_size(struct vhost_virtqueue *vq, in vhost_get_used_size() argument
534 vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; in vhost_get_used_size()
536 return size_add(struct_size(vq->used, ring, num), event); in vhost_get_used_size()
539 static size_t vhost_get_desc_size(struct vhost_virtqueue *vq, in vhost_get_desc_size() argument
542 return sizeof(*vq->desc) * num; in vhost_get_desc_size()
552 struct vhost_virtqueue *vq; in vhost_dev_init() local
574 vq = dev->vqs[i]; in vhost_dev_init()
575 vq->log = NULL; in vhost_dev_init()
576 vq->indirect = NULL; in vhost_dev_init()
577 vq->heads = NULL; in vhost_dev_init()
578 vq->dev = dev; in vhost_dev_init()
579 mutex_init(&vq->mutex); in vhost_dev_init()
580 vhost_vq_reset(dev, vq); in vhost_dev_init()
581 if (vq->handle_kick) in vhost_dev_init()
582 vhost_poll_init(&vq->poll, vq->handle_kick, in vhost_dev_init()
583 EPOLLIN, dev, vq); in vhost_dev_init()
706 static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq, in __vhost_vq_attach_worker() argument
717 mutex_lock(&vq->mutex); in __vhost_vq_attach_worker()
719 old_worker = rcu_dereference_check(vq->worker, in __vhost_vq_attach_worker()
720 lockdep_is_held(&vq->mutex)); in __vhost_vq_attach_worker()
721 rcu_assign_pointer(vq->worker, worker); in __vhost_vq_attach_worker()
725 mutex_unlock(&vq->mutex); in __vhost_vq_attach_worker()
729 mutex_unlock(&vq->mutex); in __vhost_vq_attach_worker()
749 mutex_lock(&vq->mutex); in __vhost_vq_attach_worker()
750 if (!vhost_vq_get_backend(vq) && !vq->kick) { in __vhost_vq_attach_worker()
751 mutex_unlock(&vq->mutex); in __vhost_vq_attach_worker()
764 mutex_unlock(&vq->mutex); in __vhost_vq_attach_worker()
775 static int vhost_vq_attach_worker(struct vhost_virtqueue *vq, in vhost_vq_attach_worker() argument
779 struct vhost_dev *dev = vq->dev; in vhost_vq_attach_worker()
789 __vhost_vq_attach_worker(vq, worker); in vhost_vq_attach_worker()
836 struct vhost_virtqueue **vq, u32 *id) in vhost_get_vq_from_user() argument
851 *vq = dev->vqs[idx]; in vhost_get_vq_from_user()
863 struct vhost_virtqueue *vq; in vhost_worker_ioctl() local
896 ret = vhost_get_vq_from_user(dev, argp, &vq, &idx); in vhost_worker_ioctl()
907 ret = vhost_vq_attach_worker(vq, &ring_worker); in vhost_worker_ioctl()
910 worker = rcu_dereference_check(vq->worker, in vhost_worker_ioctl()
1119 static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq, in vhost_vq_meta_fetch() argument
1123 const struct vhost_iotlb_map *map = vq->meta_iotlb[type]; in vhost_vq_meta_fetch()
1157 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
1160 static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to, in vhost_copy_to_user() argument
1165 if (!vq->iotlb) in vhost_copy_to_user()
1174 void __user *uaddr = vhost_vq_meta_fetch(vq, in vhost_copy_to_user()
1181 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov, in vhost_copy_to_user()
1182 ARRAY_SIZE(vq->iotlb_iov), in vhost_copy_to_user()
1186 iov_iter_init(&t, ITER_DEST, vq->iotlb_iov, ret, size); in vhost_copy_to_user()
1195 static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to, in vhost_copy_from_user() argument
1200 if (!vq->iotlb) in vhost_copy_from_user()
1208 void __user *uaddr = vhost_vq_meta_fetch(vq, in vhost_copy_from_user()
1216 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov, in vhost_copy_from_user()
1217 ARRAY_SIZE(vq->iotlb_iov), in vhost_copy_from_user()
1220 vq_err(vq, "IOTLB translation failure: uaddr " in vhost_copy_from_user()
1225 iov_iter_init(&f, ITER_SOURCE, vq->iotlb_iov, ret, size); in vhost_copy_from_user()
1235 static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq, in __vhost_get_user_slow() argument
1241 ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov, in __vhost_get_user_slow()
1242 ARRAY_SIZE(vq->iotlb_iov), in __vhost_get_user_slow()
1245 vq_err(vq, "IOTLB translation failure: uaddr " in __vhost_get_user_slow()
1251 if (ret != 1 || vq->iotlb_iov[0].iov_len != size) { in __vhost_get_user_slow()
1252 vq_err(vq, "Non atomic userspace memory access: uaddr " in __vhost_get_user_slow()
1258 return vq->iotlb_iov[0].iov_base; in __vhost_get_user_slow()
1266 static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, in __vhost_get_user() argument
1270 void __user *uaddr = vhost_vq_meta_fetch(vq, in __vhost_get_user()
1275 return __vhost_get_user_slow(vq, addr, size, type); in __vhost_get_user()
1278 #define vhost_put_user(vq, x, ptr) \ argument
1281 if (!vq->iotlb) { \
1285 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
1295 static inline int vhost_put_avail_event(struct vhost_virtqueue *vq) in vhost_put_avail_event() argument
1297 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx), in vhost_put_avail_event()
1298 vhost_avail_event(vq)); in vhost_put_avail_event()
1301 static inline int vhost_put_used(struct vhost_virtqueue *vq, in vhost_put_used() argument
1305 return vhost_copy_to_user(vq, vq->used->ring + idx, head, in vhost_put_used()
1309 static inline int vhost_put_used_flags(struct vhost_virtqueue *vq) in vhost_put_used_flags() argument
1312 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags), in vhost_put_used_flags()
1313 &vq->used->flags); in vhost_put_used_flags()
1316 static inline int vhost_put_used_idx(struct vhost_virtqueue *vq) in vhost_put_used_idx() argument
1319 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx), in vhost_put_used_idx()
1320 &vq->used->idx); in vhost_put_used_idx()
1323 #define vhost_get_user(vq, x, ptr, type) \ argument
1326 if (!vq->iotlb) { \
1330 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
1341 #define vhost_get_avail(vq, x, ptr) \ argument
1342 vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
1344 #define vhost_get_used(vq, x, ptr) \ argument
1345 vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
1361 static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq, in vhost_get_avail_idx() argument
1364 return vhost_get_avail(vq, *idx, &vq->avail->idx); in vhost_get_avail_idx()
1367 static inline int vhost_get_avail_head(struct vhost_virtqueue *vq, in vhost_get_avail_head() argument
1370 return vhost_get_avail(vq, *head, in vhost_get_avail_head()
1371 &vq->avail->ring[idx & (vq->num - 1)]); in vhost_get_avail_head()
1374 static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq, in vhost_get_avail_flags() argument
1377 return vhost_get_avail(vq, *flags, &vq->avail->flags); in vhost_get_avail_flags()
1380 static inline int vhost_get_used_event(struct vhost_virtqueue *vq, in vhost_get_used_event() argument
1383 return vhost_get_avail(vq, *event, vhost_used_event(vq)); in vhost_get_used_event()
1386 static inline int vhost_get_used_idx(struct vhost_virtqueue *vq, in vhost_get_used_idx() argument
1389 return vhost_get_used(vq, *idx, &vq->used->idx); in vhost_get_used_idx()
1392 static inline int vhost_get_desc(struct vhost_virtqueue *vq, in vhost_get_desc() argument
1395 return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc)); in vhost_get_desc()
1410 vhost_poll_queue(&node->vq->poll); in vhost_iotlb_notify_vq()
1632 static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access) in vhost_iotlb_miss() argument
1634 struct vhost_dev *dev = vq->dev; in vhost_iotlb_miss()
1637 bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2); in vhost_iotlb_miss()
1639 node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG); in vhost_iotlb_miss()
1659 static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, in vq_access_ok() argument
1667 if (vq->iotlb) in vq_access_ok()
1670 return access_ok(desc, vhost_get_desc_size(vq, num)) && in vq_access_ok()
1671 access_ok(avail, vhost_get_avail_size(vq, num)) && in vq_access_ok()
1672 access_ok(used, vhost_get_used_size(vq, num)); in vq_access_ok()
1675 static void vhost_vq_meta_update(struct vhost_virtqueue *vq, in vhost_vq_meta_update() argument
1683 vq->meta_iotlb[type] = map; in vhost_vq_meta_update()
1686 static bool iotlb_access_ok(struct vhost_virtqueue *vq, in iotlb_access_ok() argument
1690 struct vhost_iotlb *umem = vq->iotlb; in iotlb_access_ok()
1693 if (vhost_vq_meta_fetch(vq, addr, len, type)) in iotlb_access_ok()
1699 vhost_iotlb_miss(vq, addr, access); in iotlb_access_ok()
1711 vhost_vq_meta_update(vq, map, type); in iotlb_access_ok()
1720 int vq_meta_prefetch(struct vhost_virtqueue *vq) in vq_meta_prefetch() argument
1722 unsigned int num = vq->num; in vq_meta_prefetch()
1724 if (!vq->iotlb) in vq_meta_prefetch()
1727 return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc, in vq_meta_prefetch()
1728 vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) && in vq_meta_prefetch()
1729 iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail, in vq_meta_prefetch()
1730 vhost_get_avail_size(vq, num), in vq_meta_prefetch()
1732 iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used, in vq_meta_prefetch()
1733 vhost_get_used_size(vq, num), VHOST_ADDR_USED); in vq_meta_prefetch()
1745 static bool vq_log_used_access_ok(struct vhost_virtqueue *vq, in vq_log_used_access_ok() argument
1752 if (vq->iotlb) in vq_log_used_access_ok()
1756 vhost_get_used_size(vq, vq->num)); in vq_log_used_access_ok()
1761 static bool vq_log_access_ok(struct vhost_virtqueue *vq, in vq_log_access_ok() argument
1764 return vq_memory_access_ok(log_base, vq->umem, in vq_log_access_ok()
1765 vhost_has_feature(vq, VHOST_F_LOG_ALL)) && in vq_log_access_ok()
1766 vq_log_used_access_ok(vq, log_base, vq->log_used, vq->log_addr); in vq_log_access_ok()
1771 bool vhost_vq_access_ok(struct vhost_virtqueue *vq) in vhost_vq_access_ok() argument
1773 if (!vq_log_access_ok(vq, vq->log_base)) in vhost_vq_access_ok()
1776 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used); in vhost_vq_access_ok()
1848 struct vhost_virtqueue *vq, in vhost_vring_set_num() argument
1855 if (vq->private_data) in vhost_vring_set_num()
1863 vq->num = s.num; in vhost_vring_set_num()
1869 struct vhost_virtqueue *vq, in vhost_vring_set_addr() argument
1887 BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE); in vhost_vring_set_addr()
1888 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); in vhost_vring_set_addr()
1897 if (vq->private_data) { in vhost_vring_set_addr()
1898 if (!vq_access_ok(vq, vq->num, in vhost_vring_set_addr()
1905 if (!vq_log_used_access_ok(vq, vq->log_base, in vhost_vring_set_addr()
1911 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG)); in vhost_vring_set_addr()
1912 vq->desc = (void __user *)(unsigned long)a.desc_user_addr; in vhost_vring_set_addr()
1913 vq->avail = (void __user *)(unsigned long)a.avail_user_addr; in vhost_vring_set_addr()
1914 vq->log_addr = a.log_guest_addr; in vhost_vring_set_addr()
1915 vq->used = (void __user *)(unsigned long)a.used_user_addr; in vhost_vring_set_addr()
1921 struct vhost_virtqueue *vq, in vhost_vring_set_num_addr() argument
1927 mutex_lock(&vq->mutex); in vhost_vring_set_num_addr()
1931 r = vhost_vring_set_num(d, vq, argp); in vhost_vring_set_num_addr()
1934 r = vhost_vring_set_addr(d, vq, argp); in vhost_vring_set_num_addr()
1940 mutex_unlock(&vq->mutex); in vhost_vring_set_num_addr()
1949 struct vhost_virtqueue *vq; in vhost_vring_ioctl() local
1955 r = vhost_get_vq_from_user(d, argp, &vq, &idx); in vhost_vring_ioctl()
1961 return vhost_vring_set_num_addr(d, vq, ioctl, argp); in vhost_vring_ioctl()
1964 mutex_lock(&vq->mutex); in vhost_vring_ioctl()
1970 if (vq->private_data) { in vhost_vring_ioctl()
1978 if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) { in vhost_vring_ioctl()
1979 vq->last_avail_idx = s.num & 0xffff; in vhost_vring_ioctl()
1980 vq->last_used_idx = (s.num >> 16) & 0xffff; in vhost_vring_ioctl()
1986 vq->last_avail_idx = s.num; in vhost_vring_ioctl()
1989 vq->avail_idx = vq->last_avail_idx; in vhost_vring_ioctl()
1993 if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) in vhost_vring_ioctl()
1994 s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16); in vhost_vring_ioctl()
1996 s.num = vq->last_avail_idx; in vhost_vring_ioctl()
2010 if (eventfp != vq->kick) { in vhost_vring_ioctl()
2011 pollstop = (filep = vq->kick) != NULL; in vhost_vring_ioctl()
2012 pollstart = (vq->kick = eventfp) != NULL; in vhost_vring_ioctl()
2027 swap(ctx, vq->call_ctx.ctx); in vhost_vring_ioctl()
2039 swap(ctx, vq->error_ctx); in vhost_vring_ioctl()
2042 r = vhost_set_vring_endian(vq, argp); in vhost_vring_ioctl()
2045 r = vhost_get_vring_endian(vq, idx, argp); in vhost_vring_ioctl()
2052 vq->busyloop_timeout = s.num; in vhost_vring_ioctl()
2056 s.num = vq->busyloop_timeout; in vhost_vring_ioctl()
2064 if (pollstop && vq->handle_kick) in vhost_vring_ioctl()
2065 vhost_poll_stop(&vq->poll); in vhost_vring_ioctl()
2072 if (pollstart && vq->handle_kick) in vhost_vring_ioctl()
2073 r = vhost_poll_start(&vq->poll, vq->kick); in vhost_vring_ioctl()
2075 mutex_unlock(&vq->mutex); in vhost_vring_ioctl()
2077 if (pollstop && vq->handle_kick) in vhost_vring_ioctl()
2078 vhost_dev_flush(vq->poll.dev); in vhost_vring_ioctl()
2096 struct vhost_virtqueue *vq = d->vqs[i]; in vhost_init_device_iotlb() local
2098 mutex_lock(&vq->mutex); in vhost_init_device_iotlb()
2099 vq->iotlb = niotlb; in vhost_init_device_iotlb()
2100 __vhost_vq_meta_reset(vq); in vhost_init_device_iotlb()
2101 mutex_unlock(&vq->mutex); in vhost_init_device_iotlb()
2143 struct vhost_virtqueue *vq; in vhost_dev_ioctl() local
2145 vq = d->vqs[i]; in vhost_dev_ioctl()
2146 mutex_lock(&vq->mutex); in vhost_dev_ioctl()
2148 if (vq->private_data && !vq_log_access_ok(vq, base)) in vhost_dev_ioctl()
2151 vq->log_base = base; in vhost_dev_ioctl()
2152 mutex_unlock(&vq->mutex); in vhost_dev_ioctl()
2231 static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len) in log_write_hva() argument
2233 struct vhost_iotlb *umem = vq->umem; in log_write_hva()
2251 r = log_write(vq->log_base, in log_write_hva()
2270 static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) in log_used() argument
2272 struct iovec *iov = vq->log_iov; in log_used()
2275 if (!vq->iotlb) in log_used()
2276 return log_write(vq->log_base, vq->log_addr + used_offset, len); in log_used()
2278 ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, in log_used()
2284 ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base, in log_used()
2293 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, in vhost_log_write() argument
2301 if (vq->iotlb) { in vhost_log_write()
2303 r = log_write_hva(vq, (uintptr_t)iov[i].iov_base, in vhost_log_write()
2313 r = log_write(vq->log_base, log[i].addr, l); in vhost_log_write()
2318 if (vq->log_ctx) in vhost_log_write()
2319 eventfd_signal(vq->log_ctx, 1); in vhost_log_write()
2329 static int vhost_update_used_flags(struct vhost_virtqueue *vq) in vhost_update_used_flags() argument
2332 if (vhost_put_used_flags(vq)) in vhost_update_used_flags()
2334 if (unlikely(vq->log_used)) { in vhost_update_used_flags()
2338 used = &vq->used->flags; in vhost_update_used_flags()
2339 log_used(vq, (used - (void __user *)vq->used), in vhost_update_used_flags()
2340 sizeof vq->used->flags); in vhost_update_used_flags()
2341 if (vq->log_ctx) in vhost_update_used_flags()
2342 eventfd_signal(vq->log_ctx, 1); in vhost_update_used_flags()
2347 static int vhost_update_avail_event(struct vhost_virtqueue *vq) in vhost_update_avail_event() argument
2349 if (vhost_put_avail_event(vq)) in vhost_update_avail_event()
2351 if (unlikely(vq->log_used)) { in vhost_update_avail_event()
2356 used = vhost_avail_event(vq); in vhost_update_avail_event()
2357 log_used(vq, (used - (void __user *)vq->used), in vhost_update_avail_event()
2358 sizeof *vhost_avail_event(vq)); in vhost_update_avail_event()
2359 if (vq->log_ctx) in vhost_update_avail_event()
2360 eventfd_signal(vq->log_ctx, 1); in vhost_update_avail_event()
2365 int vhost_vq_init_access(struct vhost_virtqueue *vq) in vhost_vq_init_access() argument
2369 bool is_le = vq->is_le; in vhost_vq_init_access()
2371 if (!vq->private_data) in vhost_vq_init_access()
2374 vhost_init_is_le(vq); in vhost_vq_init_access()
2376 r = vhost_update_used_flags(vq); in vhost_vq_init_access()
2379 vq->signalled_used_valid = false; in vhost_vq_init_access()
2380 if (!vq->iotlb && in vhost_vq_init_access()
2381 !access_ok(&vq->used->idx, sizeof vq->used->idx)) { in vhost_vq_init_access()
2385 r = vhost_get_used_idx(vq, &last_used_idx); in vhost_vq_init_access()
2387 vq_err(vq, "Can't access used idx at %p\n", in vhost_vq_init_access()
2388 &vq->used->idx); in vhost_vq_init_access()
2391 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx); in vhost_vq_init_access()
2395 vq->is_le = is_le; in vhost_vq_init_access()
2400 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, in translate_desc() argument
2404 struct vhost_dev *dev = vq->dev; in translate_desc()
2441 vhost_iotlb_miss(vq, addr, access); in translate_desc()
2448 static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc) in next_desc() argument
2453 if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT))) in next_desc()
2457 next = vhost16_to_cpu(vq, READ_ONCE(desc->next)); in next_desc()
2461 static int get_indirect(struct vhost_virtqueue *vq, in get_indirect() argument
2469 u32 len = vhost32_to_cpu(vq, indirect->len); in get_indirect()
2475 vq_err(vq, "Invalid length in indirect descriptor: " in get_indirect()
2482 ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect, in get_indirect()
2486 vq_err(vq, "Translation failure %d in indirect.\n", ret); in get_indirect()
2489 iov_iter_init(&from, ITER_SOURCE, vq->indirect, ret, len); in get_indirect()
2494 vq_err(vq, "Indirect buffer length too big: %d\n", in get_indirect()
2502 vq_err(vq, "Loop detected: last one at %u " in get_indirect()
2508 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", in get_indirect()
2509 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); in get_indirect()
2512 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) { in get_indirect()
2513 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", in get_indirect()
2514 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); in get_indirect()
2518 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) in get_indirect()
2523 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), in get_indirect()
2524 vhost32_to_cpu(vq, desc.len), iov + iov_count, in get_indirect()
2528 vq_err(vq, "Translation failure %d indirect idx %d\n", in get_indirect()
2536 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); in get_indirect()
2537 log[*log_num].len = vhost32_to_cpu(vq, desc.len); in get_indirect()
2544 vq_err(vq, "Indirect descriptor " in get_indirect()
2550 } while ((i = next_desc(vq, &desc)) != -1); in get_indirect()
2562 int vhost_get_vq_desc(struct vhost_virtqueue *vq, in vhost_get_vq_desc() argument
2575 last_avail_idx = vq->last_avail_idx; in vhost_get_vq_desc()
2577 if (vq->avail_idx == vq->last_avail_idx) { in vhost_get_vq_desc()
2578 if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) { in vhost_get_vq_desc()
2579 vq_err(vq, "Failed to access avail idx at %p\n", in vhost_get_vq_desc()
2580 &vq->avail->idx); in vhost_get_vq_desc()
2583 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); in vhost_get_vq_desc()
2585 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { in vhost_get_vq_desc()
2586 vq_err(vq, "Guest moved used index from %u to %u", in vhost_get_vq_desc()
2587 last_avail_idx, vq->avail_idx); in vhost_get_vq_desc()
2594 if (vq->avail_idx == last_avail_idx) in vhost_get_vq_desc()
2595 return vq->num; in vhost_get_vq_desc()
2605 if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) { in vhost_get_vq_desc()
2606 vq_err(vq, "Failed to read head: idx %d address %p\n", in vhost_get_vq_desc()
2608 &vq->avail->ring[last_avail_idx % vq->num]); in vhost_get_vq_desc()
2612 head = vhost16_to_cpu(vq, ring_head); in vhost_get_vq_desc()
2615 if (unlikely(head >= vq->num)) { in vhost_get_vq_desc()
2616 vq_err(vq, "Guest says index %u > %u is available", in vhost_get_vq_desc()
2617 head, vq->num); in vhost_get_vq_desc()
2629 if (unlikely(i >= vq->num)) { in vhost_get_vq_desc()
2630 vq_err(vq, "Desc index is %u > %u, head = %u", in vhost_get_vq_desc()
2631 i, vq->num, head); in vhost_get_vq_desc()
2634 if (unlikely(++found > vq->num)) { in vhost_get_vq_desc()
2635 vq_err(vq, "Loop detected: last one at %u " in vhost_get_vq_desc()
2637 i, vq->num, head); in vhost_get_vq_desc()
2640 ret = vhost_get_desc(vq, &desc, i); in vhost_get_vq_desc()
2642 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n", in vhost_get_vq_desc()
2643 i, vq->desc + i); in vhost_get_vq_desc()
2646 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) { in vhost_get_vq_desc()
2647 ret = get_indirect(vq, iov, iov_size, in vhost_get_vq_desc()
2652 vq_err(vq, "Failure detected " in vhost_get_vq_desc()
2659 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) in vhost_get_vq_desc()
2663 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), in vhost_get_vq_desc()
2664 vhost32_to_cpu(vq, desc.len), iov + iov_count, in vhost_get_vq_desc()
2668 vq_err(vq, "Translation failure %d descriptor idx %d\n", in vhost_get_vq_desc()
2677 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); in vhost_get_vq_desc()
2678 log[*log_num].len = vhost32_to_cpu(vq, desc.len); in vhost_get_vq_desc()
2685 vq_err(vq, "Descriptor has out after in: " in vhost_get_vq_desc()
2691 } while ((i = next_desc(vq, &desc)) != -1); in vhost_get_vq_desc()
2694 vq->last_avail_idx++; in vhost_get_vq_desc()
2698 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); in vhost_get_vq_desc()
2704 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) in vhost_discard_vq_desc() argument
2706 vq->last_avail_idx -= n; in vhost_discard_vq_desc()
2712 int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) in vhost_add_used() argument
2715 cpu_to_vhost32(vq, head), in vhost_add_used()
2716 cpu_to_vhost32(vq, len) in vhost_add_used()
2719 return vhost_add_used_n(vq, &heads, 1); in vhost_add_used()
2723 static int __vhost_add_used_n(struct vhost_virtqueue *vq, in __vhost_add_used_n() argument
2731 start = vq->last_used_idx & (vq->num - 1); in __vhost_add_used_n()
2732 used = vq->used->ring + start; in __vhost_add_used_n()
2733 if (vhost_put_used(vq, heads, start, count)) { in __vhost_add_used_n()
2734 vq_err(vq, "Failed to write used"); in __vhost_add_used_n()
2737 if (unlikely(vq->log_used)) { in __vhost_add_used_n()
2741 log_used(vq, ((void __user *)used - (void __user *)vq->used), in __vhost_add_used_n()
2744 old = vq->last_used_idx; in __vhost_add_used_n()
2745 new = (vq->last_used_idx += count); in __vhost_add_used_n()
2750 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old))) in __vhost_add_used_n()
2751 vq->signalled_used_valid = false; in __vhost_add_used_n()
2757 int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, in vhost_add_used_n() argument
2762 start = vq->last_used_idx & (vq->num - 1); in vhost_add_used_n()
2763 n = vq->num - start; in vhost_add_used_n()
2765 r = __vhost_add_used_n(vq, heads, n); in vhost_add_used_n()
2771 r = __vhost_add_used_n(vq, heads, count); in vhost_add_used_n()
2775 if (vhost_put_used_idx(vq)) { in vhost_add_used_n()
2776 vq_err(vq, "Failed to increment used idx"); in vhost_add_used_n()
2779 if (unlikely(vq->log_used)) { in vhost_add_used_n()
2783 log_used(vq, offsetof(struct vring_used, idx), in vhost_add_used_n()
2784 sizeof vq->used->idx); in vhost_add_used_n()
2785 if (vq->log_ctx) in vhost_add_used_n()
2786 eventfd_signal(vq->log_ctx, 1); in vhost_add_used_n()
2792 static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) in vhost_notify() argument
2802 if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && in vhost_notify()
2803 unlikely(vq->avail_idx == vq->last_avail_idx)) in vhost_notify()
2806 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { in vhost_notify()
2808 if (vhost_get_avail_flags(vq, &flags)) { in vhost_notify()
2809 vq_err(vq, "Failed to get flags"); in vhost_notify()
2812 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT)); in vhost_notify()
2814 old = vq->signalled_used; in vhost_notify()
2815 v = vq->signalled_used_valid; in vhost_notify()
2816 new = vq->signalled_used = vq->last_used_idx; in vhost_notify()
2817 vq->signalled_used_valid = true; in vhost_notify()
2822 if (vhost_get_used_event(vq, &event)) { in vhost_notify()
2823 vq_err(vq, "Failed to get used event idx"); in vhost_notify()
2826 return vring_need_event(vhost16_to_cpu(vq, event), new, old); in vhost_notify()
2830 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) in vhost_signal() argument
2833 if (vq->call_ctx.ctx && vhost_notify(dev, vq)) in vhost_signal()
2834 eventfd_signal(vq->call_ctx.ctx, 1); in vhost_signal()
2840 struct vhost_virtqueue *vq, in vhost_add_used_and_signal() argument
2843 vhost_add_used(vq, head, len); in vhost_add_used_and_signal()
2844 vhost_signal(dev, vq); in vhost_add_used_and_signal()
2850 struct vhost_virtqueue *vq, in vhost_add_used_and_signal_n() argument
2853 vhost_add_used_n(vq, heads, count); in vhost_add_used_and_signal_n()
2854 vhost_signal(dev, vq); in vhost_add_used_and_signal_n()
2859 bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq) in vhost_vq_avail_empty() argument
2864 if (vq->avail_idx != vq->last_avail_idx) in vhost_vq_avail_empty()
2867 r = vhost_get_avail_idx(vq, &avail_idx); in vhost_vq_avail_empty()
2871 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); in vhost_vq_avail_empty()
2872 if (vq->avail_idx != vq->last_avail_idx) { in vhost_vq_avail_empty()
2887 bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) in vhost_enable_notify() argument
2892 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) in vhost_enable_notify()
2894 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; in vhost_enable_notify()
2895 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { in vhost_enable_notify()
2896 r = vhost_update_used_flags(vq); in vhost_enable_notify()
2898 vq_err(vq, "Failed to enable notification at %p: %d\n", in vhost_enable_notify()
2899 &vq->used->flags, r); in vhost_enable_notify()
2903 r = vhost_update_avail_event(vq); in vhost_enable_notify()
2905 vq_err(vq, "Failed to update avail event index at %p: %d\n", in vhost_enable_notify()
2906 vhost_avail_event(vq), r); in vhost_enable_notify()
2913 r = vhost_get_avail_idx(vq, &avail_idx); in vhost_enable_notify()
2915 vq_err(vq, "Failed to check avail idx at %p: %d\n", in vhost_enable_notify()
2916 &vq->avail->idx, r); in vhost_enable_notify()
2920 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); in vhost_enable_notify()
2921 if (vq->avail_idx != vq->last_avail_idx) { in vhost_enable_notify()
2936 void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) in vhost_disable_notify() argument
2940 if (vq->used_flags & VRING_USED_F_NO_NOTIFY) in vhost_disable_notify()
2942 vq->used_flags |= VRING_USED_F_NO_NOTIFY; in vhost_disable_notify()
2943 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { in vhost_disable_notify()
2944 r = vhost_update_used_flags(vq); in vhost_disable_notify()
2946 vq_err(vq, "Failed to disable notification at %p: %d\n", in vhost_disable_notify()
2947 &vq->used->flags, r); in vhost_disable_notify()
2953 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type) in vhost_new_msg() argument
2960 node->vq = vq; in vhost_new_msg()
2996 struct vhost_virtqueue *vq; in vhost_set_backend_features() local
3001 vq = dev->vqs[i]; in vhost_set_backend_features()
3002 mutex_lock(&vq->mutex); in vhost_set_backend_features()
3003 vq->acked_backend_features = features; in vhost_set_backend_features()
3004 mutex_unlock(&vq->mutex); in vhost_set_backend_features()