Lines Matching +full:- +full:- +full:disable +full:- +full:vhost +full:- +full:crypto

2  * vhost-user
7 * See the COPYING file in the top-level directory.
13 #include "hw/virtio/virtio-dmabuf.h"
14 #include "hw/virtio/vhost.h"
15 #include "hw/virtio/virtio-crypto.h"
16 #include "hw/virtio/vhost-user.h"
17 #include "hw/virtio/vhost-backend.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "chardev/char-fe.h"
21 #include "io/channel-socket.h"
23 #include "qemu/error-report.h"
24 #include "qemu/main-loop.h"
29 #include "migration/postcopy-ram.h"
37 #include "standard-headers/linux/vhost_types.h"
60 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
169 /* session id for success, -1 on errors */
244 /* Shared between vhost devs of the same virtio device */
257 * vhost region.
277 struct vhost_user *u = dev->opaque; in vhost_user_read_header()
278 CharBackend *chr = u->user->chr; in vhost_user_read_header()
286 " Original request %d.", r, size, msg->hdr.request); in vhost_user_read_header()
287 return r < 0 ? -saved_errno : -EIO; in vhost_user_read_header()
291 if (msg->hdr.flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) { in vhost_user_read_header()
293 " Flags 0x%x instead of 0x%x.", msg->hdr.flags, in vhost_user_read_header()
295 return -EPROTO; in vhost_user_read_header()
298 trace_vhost_user_read(msg->hdr.request, msg->hdr.flags); in vhost_user_read_header()
305 struct vhost_user *u = dev->opaque; in vhost_user_read()
306 CharBackend *chr = u->user->chr; in vhost_user_read()
316 if (msg->hdr.size > VHOST_USER_PAYLOAD_SIZE) { in vhost_user_read()
318 " Size %d exceeds the maximum %zu.", msg->hdr.size, in vhost_user_read()
320 return -EPROTO; in vhost_user_read()
323 if (msg->hdr.size) { in vhost_user_read()
325 size = msg->hdr.size; in vhost_user_read()
330 " Read %d instead of %d.", r, msg->hdr.size); in vhost_user_read()
331 return r < 0 ? -saved_errno : -EIO; in vhost_user_read()
344 if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) { in process_message_reply()
353 if (msg_reply.hdr.request != msg->hdr.request) { in process_message_reply()
356 msg->hdr.request, msg_reply.hdr.request); in process_message_reply()
357 return -EPROTO; in process_message_reply()
360 return msg_reply.payload.u64 ? -EIO : 0; in process_message_reply()
381 /* most non-init callers ignore the error */
385 struct vhost_user *u = dev->opaque; in vhost_user_write()
386 CharBackend *chr = u->user->chr; in vhost_user_write()
387 int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size; in vhost_user_write()
390 * Some devices, like virtio-scsi, are implemented as a single vhost_dev, in vhost_user_write()
391 * while others, like virtio-net, contain multiple vhost_devs. For in vhost_user_write()
394 * vhost-user messages should only be sent once. in vhost_user_write()
396 * Devices with multiple vhost_devs are given an associated dev->vq_index in vhost_user_write()
399 if (vhost_user_per_device_request(msg->hdr.request) in vhost_user_write()
400 && dev->vq_index != 0) { in vhost_user_write()
401 msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK; in vhost_user_write()
407 return -EINVAL; in vhost_user_write()
415 return ret < 0 ? -saved_errno : -EIO; in vhost_user_write()
418 trace_vhost_user_write(msg->hdr.request, msg->hdr.flags); in vhost_user_write()
438 bool shmfd = virtio_has_feature(dev->protocol_features, in vhost_user_set_log_base()
444 .payload.log.mmap_size = log->size * sizeof(*(log->log)), in vhost_user_set_log_base()
450 if (dev->vq_index != 0) { in vhost_user_set_log_base()
454 if (shmfd && log->fd != -1) { in vhost_user_set_log_base()
455 fds[fd_num++] = log->fd; in vhost_user_set_log_base()
474 return -EPROTO; in vhost_user_set_log_base()
489 *offset += mr->ram_block->fd_offset; in vhost_user_get_mr_data()
499 dst->userspace_addr = src->userspace_addr; in vhost_user_fill_msg_region()
500 dst->memory_size = src->memory_size; in vhost_user_fill_msg_region()
501 dst->guest_phys_addr = src->guest_phys_addr; in vhost_user_fill_msg_region()
502 dst->mmap_offset = mmap_offset; in vhost_user_fill_msg_region()
517 msg->hdr.request = VHOST_USER_SET_MEM_TABLE; in vhost_user_fill_set_mem_table_msg()
519 for (i = 0; i < dev->mem->nregions; ++i) { in vhost_user_fill_set_mem_table_msg()
520 reg = dev->mem->regions + i; in vhost_user_fill_set_mem_table_msg()
522 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); in vhost_user_fill_set_mem_table_msg()
526 trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name, in vhost_user_fill_set_mem_table_msg()
527 reg->memory_size, in vhost_user_fill_set_mem_table_msg()
528 reg->guest_phys_addr, in vhost_user_fill_set_mem_table_msg()
529 reg->userspace_addr, in vhost_user_fill_set_mem_table_msg()
531 u->region_rb_offset[i] = offset; in vhost_user_fill_set_mem_table_msg()
532 u->region_rb[i] = mr->ram_block; in vhost_user_fill_set_mem_table_msg()
534 error_report("Failed preparing vhost-user memory table msg"); in vhost_user_fill_set_mem_table_msg()
535 return -ENOBUFS; in vhost_user_fill_set_mem_table_msg()
538 msg->payload.memory.regions[*fd_num] = region_buffer; in vhost_user_fill_set_mem_table_msg()
541 u->region_rb_offset[i] = 0; in vhost_user_fill_set_mem_table_msg()
542 u->region_rb[i] = NULL; in vhost_user_fill_set_mem_table_msg()
546 msg->payload.memory.nregions = *fd_num; in vhost_user_fill_set_mem_table_msg()
549 error_report("Failed initializing vhost-user memory map, " in vhost_user_fill_set_mem_table_msg()
550 "consider using -object memory-backend-file share=on"); in vhost_user_fill_set_mem_table_msg()
551 return -EINVAL; in vhost_user_fill_set_mem_table_msg()
554 msg->hdr.size = sizeof(msg->payload.memory.nregions); in vhost_user_fill_set_mem_table_msg()
555 msg->hdr.size += sizeof(msg->payload.memory.padding); in vhost_user_fill_set_mem_table_msg()
556 msg->hdr.size += *fd_num * sizeof(VhostUserMemoryRegion); in vhost_user_fill_set_mem_table_msg()
564 return shadow_reg->guest_phys_addr == vdev_reg->guest_phys_addr && in reg_equal()
565 shadow_reg->userspace_addr == vdev_reg->userspace_addr && in reg_equal()
566 shadow_reg->memory_size == vdev_reg->memory_size; in reg_equal()
576 struct vhost_user *u = dev->opaque; in scrub_shadow_regions()
590 for (i = 0; i < u->num_shadow_regions; i++) { in scrub_shadow_regions()
591 shadow_reg = &u->shadow_regions[i]; in scrub_shadow_regions()
594 for (j = 0; j < dev->mem->nregions; j++) { in scrub_shadow_regions()
595 reg = &dev->mem->regions[j]; in scrub_shadow_regions()
597 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); in scrub_shadow_regions()
608 u->region_rb_offset[j] = offset; in scrub_shadow_regions()
609 u->region_rb[j] = mr->ram_block; in scrub_shadow_regions()
610 shadow_pcb[j] = u->postcopy_client_bases[i]; in scrub_shadow_regions()
612 u->region_rb_offset[j] = 0; in scrub_shadow_regions()
613 u->region_rb[j] = NULL; in scrub_shadow_regions()
636 for (i = 0; i < dev->mem->nregions; i++) { in scrub_shadow_regions()
637 reg = &dev->mem->regions[i]; in scrub_shadow_regions()
638 vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); in scrub_shadow_regions()
666 struct vhost_user *u = dev->opaque; in send_remove_regions()
677 for (i = nr_rem_reg - 1; i >= 0; i--) { in send_remove_regions()
681 vhost_user_get_mr_data(shadow_reg->userspace_addr, &offset, &fd); in send_remove_regions()
684 msg->hdr.request = VHOST_USER_REM_MEM_REG; in send_remove_regions()
686 msg->payload.mem_reg.region = region_buffer; in send_remove_regions()
705 memmove(&u->shadow_regions[shadow_reg_idx], in send_remove_regions()
706 &u->shadow_regions[shadow_reg_idx + 1], in send_remove_regions()
708 (u->num_shadow_regions - shadow_reg_idx - 1)); in send_remove_regions()
709 u->num_shadow_regions--; in send_remove_regions()
720 struct vhost_user *u = dev->opaque; in send_add_regions()
733 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); in send_add_regions()
737 trace_vhost_user_set_mem_table_withfd(reg_fd_idx, mr->name, in send_add_regions()
738 reg->memory_size, in send_add_regions()
739 reg->guest_phys_addr, in send_add_regions()
740 reg->userspace_addr, in send_add_regions()
742 u->region_rb_offset[reg_idx] = offset; in send_add_regions()
743 u->region_rb[reg_idx] = mr->ram_block; in send_add_regions()
745 msg->hdr.request = VHOST_USER_ADD_MEM_REG; in send_add_regions()
747 msg->payload.mem_reg.region = region_buffer; in send_add_regions()
769 return -EPROTO; in send_add_regions()
776 if (msg_reply.hdr.size != msg->hdr.size) { in send_add_regions()
779 msg->hdr.size); in send_add_regions()
780 return -EPROTO; in send_add_regions()
784 if (reply_gpa == dev->mem->regions[reg_idx].guest_phys_addr) { in send_add_regions()
789 msg->payload.mem_reg.region.userspace_addr, in send_add_regions()
795 dev->mem->regions[reg_idx].guest_phys_addr); in send_add_regions()
796 return -EPROTO; in send_add_regions()
805 u->region_rb_offset[reg_idx] = 0; in send_add_regions()
806 u->region_rb[reg_idx] = NULL; in send_add_regions()
815 u->shadow_regions[u->num_shadow_regions].guest_phys_addr = in send_add_regions()
816 reg->guest_phys_addr; in send_add_regions()
817 u->shadow_regions[u->num_shadow_regions].userspace_addr = in send_add_regions()
818 reg->userspace_addr; in send_add_regions()
819 u->shadow_regions[u->num_shadow_regions].memory_size = in send_add_regions()
820 reg->memory_size; in send_add_regions()
821 u->num_shadow_regions++; in send_add_regions()
832 struct vhost_user *u = dev->opaque; in vhost_user_add_remove_regions()
839 msg->hdr.size = sizeof(msg->payload.mem_reg); in vhost_user_add_remove_regions()
862 memcpy(u->postcopy_client_bases, shadow_pcb, in vhost_user_add_remove_regions()
870 msg->hdr.size = sizeof(msg->payload.u64); in vhost_user_add_remove_regions()
871 msg->payload.u64 = 0; /* OK */ in vhost_user_add_remove_regions()
883 memcpy(u->postcopy_client_bases, shadow_pcb, in vhost_user_add_remove_regions()
895 struct vhost_user *u = dev->opaque; in vhost_user_set_mem_table_postcopy()
906 if (u->region_rb_len < dev->mem->nregions) { in vhost_user_set_mem_table_postcopy()
907 u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions); in vhost_user_set_mem_table_postcopy()
908 u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset, in vhost_user_set_mem_table_postcopy()
909 dev->mem->nregions); in vhost_user_set_mem_table_postcopy()
910 memset(&(u->region_rb[u->region_rb_len]), '\0', in vhost_user_set_mem_table_postcopy()
911 sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len)); in vhost_user_set_mem_table_postcopy()
912 memset(&(u->region_rb_offset[u->region_rb_len]), '\0', in vhost_user_set_mem_table_postcopy()
913 sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len)); in vhost_user_set_mem_table_postcopy()
914 u->region_rb_len = dev->mem->nregions; in vhost_user_set_mem_table_postcopy()
943 return -EPROTO; in vhost_user_set_mem_table_postcopy()
954 return -EPROTO; in vhost_user_set_mem_table_postcopy()
957 memset(u->postcopy_client_bases, 0, in vhost_user_set_mem_table_postcopy()
966 region_i < dev->mem->nregions; in vhost_user_set_mem_table_postcopy()
970 dev->mem->regions[region_i].guest_phys_addr) { in vhost_user_set_mem_table_postcopy()
971 u->postcopy_client_bases[region_i] = in vhost_user_set_mem_table_postcopy()
984 return -EIO; in vhost_user_set_mem_table_postcopy()
1007 struct vhost_user *u = dev->opaque; in vhost_user_set_mem_table()
1010 bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler; in vhost_user_set_mem_table()
1011 bool reply_supported = virtio_has_feature(dev->protocol_features, in vhost_user_set_mem_table()
1014 virtio_has_feature(dev->protocol_features, in vhost_user_set_mem_table()
1063 bool cross_endian = virtio_has_feature(dev->protocol_features, in vhost_user_set_vring_endian()
1073 error_report("vhost-user trying to send unhandled ioctl"); in vhost_user_set_vring_endian()
1074 return -ENOTSUP; in vhost_user_set_vring_endian()
1088 if (vhost_user_per_device_request(request) && dev->vq_index != 0) { in vhost_user_get_u64()
1105 return -EPROTO; in vhost_user_get_u64()
1110 return -EPROTO; in vhost_user_get_u64()
1121 return -EPROTO; in vhost_user_get_features()
1127 /* Note: "msg->hdr.flags" may be modified. */
1134 bool reply_supported = virtio_has_feature(dev->protocol_features, in vhost_user_write_sync()
1137 msg->hdr.flags |= VHOST_USER_NEED_REPLY_MASK; in vhost_user_write_sync()
1149 if (msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) { in vhost_user_write_sync()
1188 if (n->unmap_addr) { in vhost_user_host_notifier_free()
1189 munmap(n->unmap_addr, qemu_real_host_page_size()); in vhost_user_host_notifier_free()
1190 n->unmap_addr = NULL; in vhost_user_host_notifier_free()
1192 if (n->destroy) { in vhost_user_host_notifier_free()
1194 object_unparent(OBJECT(&n->mr)); in vhost_user_host_notifier_free()
1201 * clean-up function for notifier, will finally free the structure
1208 * if destroy == false and n->addr == NULL, we have nothing to do. in vhost_user_host_notifier_remove()
1211 if (!n || (!destroy && !n->addr)) { in vhost_user_host_notifier_remove()
1215 if (n->addr) { in vhost_user_host_notifier_remove()
1218 virtio_queue_set_host_notifier_mr(vdev, n->idx, &n->mr, false); in vhost_user_host_notifier_remove()
1221 assert(!n->unmap_addr); in vhost_user_host_notifier_remove()
1222 n->unmap_addr = n->addr; in vhost_user_host_notifier_remove()
1223 n->addr = NULL; in vhost_user_host_notifier_remove()
1225 n->destroy = destroy; in vhost_user_host_notifier_remove()
1239 if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) { in vhost_user_set_vring_enable()
1240 return -EINVAL; in vhost_user_set_vring_enable()
1243 for (i = 0; i < dev->nvqs; ++i) { in vhost_user_set_vring_enable()
1246 .index = dev->vq_index + i, in vhost_user_set_vring_enable()
1251 * SET_VRING_ENABLE travels from guest to QEMU to vhost-user backend / in vhost_user_set_vring_enable()
1253 * from guest to vhost-user backend / data plane thread via eventfd. in vhost_user_set_vring_enable()
1259 * seemingly disabled queue). To prevent this out-of-order delivery, in vhost_user_set_vring_enable()
1261 * backend control plane acknowledges enabling the queue -- IOW, pass in vhost_user_set_vring_enable()
1269 * the device-level recovery. in vhost_user_set_vring_enable()
1281 if (idx >= u->notifiers->len) { in fetch_notifier()
1284 return g_ptr_array_index(u->notifiers, idx); in fetch_notifier()
1297 struct vhost_user *u = dev->opaque; in vhost_user_get_vring_base()
1299 VhostUserHostNotifier *n = fetch_notifier(u->user, ring->index); in vhost_user_get_vring_base()
1300 vhost_user_host_notifier_remove(n, dev->vdev, false); in vhost_user_get_vring_base()
1315 return -EPROTO; in vhost_user_get_vring_base()
1320 return -EPROTO; in vhost_user_get_vring_base()
1337 .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK, in vhost_set_vring_file()
1341 if (file->fd > 0) { in vhost_set_vring_file()
1342 fds[fd_num++] = file->fd; in vhost_set_vring_file()
1382 bool wait_for_reply = addr->flags & (1 << VHOST_VRING_F_LOG); in vhost_user_set_vring_addr()
1454 features | dev->backend_features, in vhost_user_set_features()
1457 if (virtio_has_feature(dev->protocol_features, in vhost_user_set_features()
1512 if (!virtio_has_feature(dev->protocol_features, in vhost_user_reset_device()
1514 return -ENOSYS; in vhost_user_reset_device()
1522 if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) { in vhost_user_backend_handle_config_change()
1523 return -ENOSYS; in vhost_user_backend_handle_config_change()
1526 return dev->config_ops->vhost_dev_config_notifier(dev); in vhost_user_backend_handle_config_change()
1537 if (idx >= u->notifiers->len) { in fetch_or_create_notifier()
1538 g_ptr_array_set_size(u->notifiers, idx + 1); in fetch_or_create_notifier()
1541 n = g_ptr_array_index(u->notifiers, idx); in fetch_or_create_notifier()
1544 * In case notification arrive out-of-order, in fetch_or_create_notifier()
1547 g_ptr_array_remove_index(u->notifiers, idx); in fetch_or_create_notifier()
1549 n->idx = idx; in fetch_or_create_notifier()
1550 g_ptr_array_insert(u->notifiers, idx, n); in fetch_or_create_notifier()
1561 int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK; in vhost_user_backend_handle_vring_host_notifier()
1563 struct vhost_user *u = dev->opaque; in vhost_user_backend_handle_vring_host_notifier()
1564 VhostUserState *user = u->user; in vhost_user_backend_handle_vring_host_notifier()
1565 VirtIODevice *vdev = dev->vdev; in vhost_user_backend_handle_vring_host_notifier()
1570 if (!virtio_has_feature(dev->protocol_features, in vhost_user_backend_handle_vring_host_notifier()
1573 return -EINVAL; in vhost_user_backend_handle_vring_host_notifier()
1583 if (area->u64 & VHOST_USER_VRING_NOFD_MASK) { in vhost_user_backend_handle_vring_host_notifier()
1588 if (area->size != page_size) { in vhost_user_backend_handle_vring_host_notifier()
1589 return -EINVAL; in vhost_user_backend_handle_vring_host_notifier()
1593 fd, area->offset); in vhost_user_backend_handle_vring_host_notifier()
1595 return -EFAULT; in vhost_user_backend_handle_vring_host_notifier()
1598 name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]", in vhost_user_backend_handle_vring_host_notifier()
1600 if (!n->mr.ram) { /* Don't init again after suspend. */ in vhost_user_backend_handle_vring_host_notifier()
1601 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name, in vhost_user_backend_handle_vring_host_notifier()
1604 n->mr.ram_block->host = addr; in vhost_user_backend_handle_vring_host_notifier()
1608 if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) { in vhost_user_backend_handle_vring_host_notifier()
1609 object_unparent(OBJECT(&n->mr)); in vhost_user_backend_handle_vring_host_notifier()
1611 return -ENXIO; in vhost_user_backend_handle_vring_host_notifier()
1614 n->addr = addr; in vhost_user_backend_handle_vring_host_notifier()
1625 memcpy(uuid.data, object->uuid, sizeof(object->uuid)); in vhost_user_backend_handle_shared_object_add()
1640 memcpy(uuid.data, object->uuid, sizeof(object->uuid)); in vhost_user_backend_handle_shared_object_remove()
1646 /* Not allowed to remove non-owned entries */ in vhost_user_backend_handle_shared_object_remove()
1652 /* Not allowed to remove non-owned entries */ in vhost_user_backend_handle_shared_object_remove()
1664 { .iov_base = payload, .iov_len = hdr->size }, in vhost_user_send_resp()
1667 hdr->flags &= ~VHOST_USER_NEED_REPLY_MASK; in vhost_user_send_resp()
1668 hdr->flags |= VHOST_USER_REPLY_MASK; in vhost_user_send_resp()
1677 hdr->size = sizeof(payload->u64); in vhost_user_backend_send_dmabuf_fd()
1684 struct vhost_user *u = dev->opaque; in vhost_user_get_shared_object()
1685 CharBackend *chr = u->user->chr; in vhost_user_get_shared_object()
1707 return -EPROTO; in vhost_user_get_shared_object()
1713 return -EIO; in vhost_user_get_shared_object()
1726 CharBackend *chr = u->user->chr; in vhost_user_backend_handle_shared_object_lookup()
1728 int dmabuf_fd = -1; in vhost_user_backend_handle_shared_object_lookup()
1731 memcpy(uuid.data, payload->object.uuid, sizeof(payload->object.uuid)); in vhost_user_backend_handle_shared_object_lookup()
1733 payload->u64 = 0; in vhost_user_backend_handle_shared_object_lookup()
1742 payload->u64 = -EINVAL; in vhost_user_backend_handle_shared_object_lookup()
1747 payload->u64 = ret; in vhost_user_backend_handle_shared_object_lookup()
1752 payload->u64 = -EINVAL; in vhost_user_backend_handle_shared_object_lookup()
1756 if (dmabuf_fd != -1) { in vhost_user_backend_handle_shared_object_lookup()
1762 payload->u64 = -EINVAL; in vhost_user_backend_handle_shared_object_lookup()
1767 return -EINVAL; in vhost_user_backend_handle_shared_object_lookup()
1775 g_source_destroy(u->backend_src); in close_backend_channel()
1776 g_source_unref(u->backend_src); in close_backend_channel()
1777 u->backend_src = NULL; in close_backend_channel()
1778 object_unref(OBJECT(u->backend_ioc)); in close_backend_channel()
1779 u->backend_ioc = NULL; in close_backend_channel()
1786 struct vhost_user *u = dev->opaque; in backend_read()
1828 fd ? fd[0] : -1); in backend_read()
1838 ret = vhost_user_backend_handle_shared_object_lookup(dev->opaque, ioc, in backend_read()
1843 ret = -EINVAL; in backend_read()
1881 struct vhost_user *u = dev->opaque; in vhost_setup_backend_channel()
1883 bool reply_supported = virtio_has_feature(dev->protocol_features, in vhost_setup_backend_channel()
1888 if (!virtio_has_feature(dev->protocol_features, in vhost_setup_backend_channel()
1893 if (qemu_socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) { in vhost_setup_backend_channel()
1896 return -saved_errno; in vhost_setup_backend_channel()
1902 return -ECONNREFUSED; in vhost_setup_backend_channel()
1904 u->backend_ioc = ioc; in vhost_setup_backend_channel()
1905 u->backend_src = qio_channel_add_watch_source(u->backend_ioc, in vhost_setup_backend_channel()
1940 struct vhost_dev *dev = pcfd->data; in vhost_user_postcopy_fault_handler()
1941 struct vhost_user *u = dev->opaque; in vhost_user_postcopy_fault_handler()
1943 uint64_t faultaddr = msg->arg.pagefault.address; in vhost_user_postcopy_fault_handler()
1948 trace_vhost_user_postcopy_fault_handler(pcfd->idstr, faultaddr, in vhost_user_postcopy_fault_handler()
1949 dev->mem->nregions); in vhost_user_postcopy_fault_handler()
1950 for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) { in vhost_user_postcopy_fault_handler()
1952 u->postcopy_client_bases[i], dev->mem->regions[i].memory_size); in vhost_user_postcopy_fault_handler()
1953 if (faultaddr >= u->postcopy_client_bases[i]) { in vhost_user_postcopy_fault_handler()
1954 /* Ofset of the fault address in the vhost region */ in vhost_user_postcopy_fault_handler()
1955 uint64_t region_offset = faultaddr - u->postcopy_client_bases[i]; in vhost_user_postcopy_fault_handler()
1956 if (region_offset < dev->mem->regions[i].memory_size) { in vhost_user_postcopy_fault_handler()
1957 rb_offset = region_offset + u->region_rb_offset[i]; in vhost_user_postcopy_fault_handler()
1960 rb = u->region_rb[i]; in vhost_user_postcopy_fault_handler()
1968 return -1; in vhost_user_postcopy_fault_handler()
1974 struct vhost_dev *dev = pcfd->data; in vhost_user_postcopy_waker()
1975 struct vhost_user *u = dev->opaque; in vhost_user_postcopy_waker()
1984 for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) { in vhost_user_postcopy_waker()
1985 if (u->region_rb[i] == rb && in vhost_user_postcopy_waker()
1986 offset >= u->region_rb_offset[i] && in vhost_user_postcopy_waker()
1987 offset < (u->region_rb_offset[i] + in vhost_user_postcopy_waker()
1988 dev->mem->regions[i].memory_size)) { in vhost_user_postcopy_waker()
1989 uint64_t client_addr = (offset - u->region_rb_offset[i]) + in vhost_user_postcopy_waker()
1990 u->postcopy_client_bases[i]; in vhost_user_postcopy_waker()
2008 struct vhost_user *u = dev->opaque; in vhost_user_postcopy_advise()
2009 CharBackend *chr = u->user->chr; in vhost_user_postcopy_advise()
2019 error_setg(errp, "Failed to send postcopy_advise to vhost"); in vhost_user_postcopy_advise()
2025 error_setg(errp, "Failed to get postcopy_advise reply from vhost"); in vhost_user_postcopy_advise()
2032 return -EPROTO; in vhost_user_postcopy_advise()
2037 return -EPROTO; in vhost_user_postcopy_advise()
2042 return -EIO; in vhost_user_postcopy_advise()
2047 u->postcopy_fd.fd = ufd; in vhost_user_postcopy_advise()
2048 u->postcopy_fd.data = dev; in vhost_user_postcopy_advise()
2049 u->postcopy_fd.handler = vhost_user_postcopy_fault_handler; in vhost_user_postcopy_advise()
2050 u->postcopy_fd.waker = vhost_user_postcopy_waker; in vhost_user_postcopy_advise()
2051 u->postcopy_fd.idstr = "vhost-user"; /* Need to find unique name */ in vhost_user_postcopy_advise()
2052 postcopy_register_shared_ufd(&u->postcopy_fd); in vhost_user_postcopy_advise()
2055 error_setg(errp, "Postcopy not supported on non-Linux systems"); in vhost_user_postcopy_advise()
2056 return -ENOSYS; in vhost_user_postcopy_advise()
2065 struct vhost_user *u = dev->opaque; in vhost_user_postcopy_listen()
2071 u->postcopy_listen = true; in vhost_user_postcopy_listen()
2077 error_setg(errp, "Failed to send postcopy_listen to vhost"); in vhost_user_postcopy_listen()
2100 struct vhost_user *u = dev->opaque; in vhost_user_postcopy_end()
2106 error_setg(errp, "Failed to send postcopy_end to vhost"); in vhost_user_postcopy_end()
2115 postcopy_unregister_shared_ufd(&u->postcopy_fd); in vhost_user_postcopy_end()
2116 close(u->postcopy_fd.fd); in vhost_user_postcopy_end()
2117 u->postcopy_fd.handler = NULL; in vhost_user_postcopy_end()
2130 struct vhost_dev *dev = u->dev; in vhost_user_postcopy_notifier()
2132 switch (pnd->reason) { in vhost_user_postcopy_notifier()
2134 if (!virtio_has_feature(dev->protocol_features, in vhost_user_postcopy_notifier()
2138 "vhost-user backend not capable of postcopy"); in vhost_user_postcopy_notifier()
2139 return -ENOENT; in vhost_user_postcopy_notifier()
2168 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); in vhost_user_backend_init()
2171 u->user = vus; in vhost_user_backend_init()
2172 u->dev = dev; in vhost_user_backend_init()
2173 dev->opaque = u; in vhost_user_backend_init()
2177 error_setg_errno(errp, -err, "vhost_backend_init failed"); in vhost_user_backend_init()
2182 bool supports_f_config = vus->supports_config || in vhost_user_backend_init()
2183 (dev->config_ops && dev->config_ops->vhost_dev_config_notifier); in vhost_user_backend_init()
2186 dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES; in vhost_user_backend_init()
2192 return -EPROTO; in vhost_user_backend_init()
2196 * We will use all the protocol features we support - although in vhost_user_backend_init()
2205 error_setg(errp, "vhost-user device expecting " in vhost_user_backend_init()
2206 "VHOST_USER_PROTOCOL_F_CONFIG but the vhost-user backend does " in vhost_user_backend_init()
2208 return -EPROTO; in vhost_user_backend_init()
2213 warn_report("vhost-user backend supports " in vhost_user_backend_init()
2220 dev->protocol_features = protocol_features; in vhost_user_backend_init()
2221 err = vhost_user_set_protocol_features(dev, dev->protocol_features); in vhost_user_backend_init()
2224 return -EPROTO; in vhost_user_backend_init()
2228 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) { in vhost_user_backend_init()
2230 &dev->max_queues); in vhost_user_backend_init()
2233 return -EPROTO; in vhost_user_backend_init()
2236 dev->max_queues = 1; in vhost_user_backend_init()
2239 if (dev->num_queues && dev->max_queues < dev->num_queues) { in vhost_user_backend_init()
2241 "backend is %" PRIu64, dev->max_queues); in vhost_user_backend_init()
2242 return -EINVAL; in vhost_user_backend_init()
2246 !(virtio_has_feature(dev->protocol_features, in vhost_user_backend_init()
2248 virtio_has_feature(dev->protocol_features, in vhost_user_backend_init()
2250 error_setg(errp, "IOMMU support requires reply-ack and " in vhost_user_backend_init()
2251 "backend-req protocol features."); in vhost_user_backend_init()
2252 return -EINVAL; in vhost_user_backend_init()
2256 if (!virtio_has_feature(dev->protocol_features, in vhost_user_backend_init()
2258 u->user->memory_slots = VHOST_MEMORY_BASELINE_NREGIONS; in vhost_user_backend_init()
2263 return -EPROTO; in vhost_user_backend_init()
2266 if (ram_slots < u->user->memory_slots) { in vhost_user_backend_init()
2270 u->user->memory_slots); in vhost_user_backend_init()
2271 return -EINVAL; in vhost_user_backend_init()
2274 u->user->memory_slots = MIN(ram_slots, VHOST_USER_MAX_RAM_SLOTS); in vhost_user_backend_init()
2278 if (dev->migration_blocker == NULL && in vhost_user_backend_init()
2279 !virtio_has_feature(dev->protocol_features, in vhost_user_backend_init()
2281 error_setg(&dev->migration_blocker, in vhost_user_backend_init()
2282 "Migration disabled: vhost-user backend lacks " in vhost_user_backend_init()
2286 if (dev->vq_index == 0) { in vhost_user_backend_init()
2290 return -EPROTO; in vhost_user_backend_init()
2294 u->postcopy_notifier.notify = vhost_user_postcopy_notifier; in vhost_user_backend_init()
2295 postcopy_add_notifier(&u->postcopy_notifier); in vhost_user_backend_init()
2304 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); in vhost_user_backend_cleanup()
2306 u = dev->opaque; in vhost_user_backend_cleanup()
2307 if (u->postcopy_notifier.notify) { in vhost_user_backend_cleanup()
2308 postcopy_remove_notifier(&u->postcopy_notifier); in vhost_user_backend_cleanup()
2309 u->postcopy_notifier.notify = NULL; in vhost_user_backend_cleanup()
2311 u->postcopy_listen = false; in vhost_user_backend_cleanup()
2312 if (u->postcopy_fd.handler) { in vhost_user_backend_cleanup()
2313 postcopy_unregister_shared_ufd(&u->postcopy_fd); in vhost_user_backend_cleanup()
2314 close(u->postcopy_fd.fd); in vhost_user_backend_cleanup()
2315 u->postcopy_fd.handler = NULL; in vhost_user_backend_cleanup()
2317 if (u->backend_ioc) { in vhost_user_backend_cleanup()
2320 g_free(u->region_rb); in vhost_user_backend_cleanup()
2321 u->region_rb = NULL; in vhost_user_backend_cleanup()
2322 g_free(u->region_rb_offset); in vhost_user_backend_cleanup()
2323 u->region_rb_offset = NULL; in vhost_user_backend_cleanup()
2324 u->region_rb_len = 0; in vhost_user_backend_cleanup()
2326 dev->opaque = 0; in vhost_user_backend_cleanup()
2333 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); in vhost_user_get_vq_index()
2340 struct vhost_user *u = dev->opaque; in vhost_user_memslots_limit()
2342 return u->user->memory_slots; in vhost_user_memslots_limit()
2347 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); in vhost_user_requires_shm_log()
2349 return virtio_has_feature(dev->protocol_features, in vhost_user_requires_shm_log()
2357 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); in vhost_user_migration_done()
2360 if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) { in vhost_user_migration_done()
2365 if (virtio_has_feature(dev->protocol_features, in vhost_user_migration_done()
2374 return -ENOTSUP; in vhost_user_migration_done()
2380 bool reply_supported = virtio_has_feature(dev->protocol_features, in vhost_user_net_set_mtu()
2384 if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) { in vhost_user_net_set_mtu()
2431 /* No-op as the receive channel is not dedicated to IOTLB messages. */ in vhost_user_set_iotlb_callback()
2444 if (!virtio_has_feature(dev->protocol_features, in vhost_user_get_config()
2447 return -EINVAL; in vhost_user_get_config()
2456 error_setg_errno(errp, -ret, "vhost_get_config failed"); in vhost_user_get_config()
2462 error_setg_errno(errp, -ret, "vhost_get_config failed"); in vhost_user_get_config()
2470 return -EPROTO; in vhost_user_get_config()
2475 return -EPROTO; in vhost_user_get_config()
2488 bool reply_supported = virtio_has_feature(dev->protocol_features, in vhost_user_set_config()
2497 if (!virtio_has_feature(dev->protocol_features, in vhost_user_set_config()
2499 return -ENOTSUP; in vhost_user_set_config()
2507 return -EINVAL; in vhost_user_set_config()
2533 bool crypto_session = virtio_has_feature(dev->protocol_features, in vhost_user_crypto_create_session()
2542 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); in vhost_user_crypto_create_session()
2545 error_report("vhost-user trying to send unhandled ioctl"); in vhost_user_crypto_create_session()
2546 return -ENOTSUP; in vhost_user_crypto_create_session()
2549 if (backend_info->op_code == VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION) { in vhost_user_crypto_create_session()
2550 CryptoDevBackendAsymSessionInfo *sess = &backend_info->u.asym_sess_info; in vhost_user_crypto_create_session()
2555 if (sess->keylen) { in vhost_user_crypto_create_session()
2557 if (sess->keylen > keylen) { in vhost_user_crypto_create_session()
2559 return -ENOTSUP; in vhost_user_crypto_create_session()
2562 memcpy(&msg.payload.session.u.asym.key, sess->key, in vhost_user_crypto_create_session()
2563 sess->keylen); in vhost_user_crypto_create_session()
2566 CryptoDevBackendSymSessionInfo *sess = &backend_info->u.sym_sess_info; in vhost_user_crypto_create_session()
2571 if (sess->key_len) { in vhost_user_crypto_create_session()
2573 if (sess->key_len > keylen) { in vhost_user_crypto_create_session()
2575 return -ENOTSUP; in vhost_user_crypto_create_session()
2578 memcpy(&msg.payload.session.u.sym.key, sess->cipher_key, in vhost_user_crypto_create_session()
2579 sess->key_len); in vhost_user_crypto_create_session()
2582 if (sess->auth_key_len > 0) { in vhost_user_crypto_create_session()
2584 if (sess->auth_key_len > keylen) { in vhost_user_crypto_create_session()
2586 return -ENOTSUP; in vhost_user_crypto_create_session()
2589 memcpy(&msg.payload.session.u.sym.auth_key, sess->auth_key, in vhost_user_crypto_create_session()
2590 sess->auth_key_len); in vhost_user_crypto_create_session()
2594 msg.payload.session.op_code = backend_info->op_code; in vhost_user_crypto_create_session()
2595 msg.payload.session.session_id = backend_info->session_id; in vhost_user_crypto_create_session()
2613 return -EPROTO; in vhost_user_crypto_create_session()
2618 return -EPROTO; in vhost_user_crypto_create_session()
2624 return -EINVAL; in vhost_user_crypto_create_session()
2635 bool crypto_session = virtio_has_feature(dev->protocol_features, in vhost_user_crypto_close_session()
2645 error_report("vhost-user trying to send unhandled ioctl"); in vhost_user_crypto_close_session()
2646 return -ENOTSUP; in vhost_user_crypto_close_session()
2671 struct vhost_user *u = dev->opaque; in vhost_user_get_inflight_fd()
2672 CharBackend *chr = u->user->chr; in vhost_user_get_inflight_fd()
2676 .payload.inflight.num_queues = dev->nvqs, in vhost_user_get_inflight_fd()
2681 if (!virtio_has_feature(dev->protocol_features, in vhost_user_get_inflight_fd()
2700 return -EPROTO; in vhost_user_get_inflight_fd()
2705 return -EPROTO; in vhost_user_get_inflight_fd()
2715 return -EIO; in vhost_user_get_inflight_fd()
2724 return -EFAULT; in vhost_user_get_inflight_fd()
2727 inflight->addr = addr; in vhost_user_get_inflight_fd()
2728 inflight->fd = fd; in vhost_user_get_inflight_fd()
2729 inflight->size = msg.payload.inflight.mmap_size; in vhost_user_get_inflight_fd()
2730 inflight->offset = msg.payload.inflight.mmap_offset; in vhost_user_get_inflight_fd()
2731 inflight->queue_size = queue_size; in vhost_user_get_inflight_fd()
2742 .payload.inflight.mmap_size = inflight->size, in vhost_user_set_inflight_fd()
2743 .payload.inflight.mmap_offset = inflight->offset, in vhost_user_set_inflight_fd()
2744 .payload.inflight.num_queues = dev->nvqs, in vhost_user_set_inflight_fd()
2745 .payload.inflight.queue_size = inflight->queue_size, in vhost_user_set_inflight_fd()
2749 if (!virtio_has_feature(dev->protocol_features, in vhost_user_set_inflight_fd()
2754 return vhost_user_write(dev, &msg, &inflight->fd, 1); in vhost_user_set_inflight_fd()
2765 if (user->chr) { in vhost_user_init()
2766 error_setg(errp, "Cannot initialize vhost-user state"); in vhost_user_init()
2769 user->chr = chr; in vhost_user_init()
2770 user->memory_slots = 0; in vhost_user_init()
2771 user->notifiers = g_ptr_array_new_full(VIRTIO_QUEUE_MAX / 4, in vhost_user_init()
2778 if (!user->chr) { in vhost_user_cleanup()
2781 user->notifiers = (GPtrArray *) g_ptr_array_free(user->notifiers, true); in vhost_user_cleanup()
2782 user->chr = NULL; in vhost_user_cleanup()
2790 struct vhost_dev *vhost; member
2797 data->cb(data->dev); in vhost_user_async_close_bh()
2804 * we want to keep all the in-flight data as is for migration
2808 CharBackend *chardev, struct vhost_dev *vhost, in vhost_user_async_close() argument
2813 * A close event may happen during a read/write, but vhost in vhost_user_async_close()
2821 data->cb = cb; in vhost_user_async_close()
2822 data->dev = d; in vhost_user_async_close()
2823 data->cd = chardev; in vhost_user_async_close()
2824 data->vhost = vhost; in vhost_user_async_close()
2826 /* Disable any further notifications on the chardev */ in vhost_user_async_close()
2834 * Move vhost device to the stopped state. The vhost-user device in vhost_user_async_close()
2836 * the vhost migration code. If disconnect was caught there is an in vhost_user_async_close()
2837 * option for the general vhost code to get the dev state without in vhost_user_async_close()
2838 * knowing its type (in this case vhost-user). in vhost_user_async_close()
2840 * Note if the vhost device is fully cleared by the time we in vhost_user_async_close()
2843 vhost->started = false; in vhost_user_async_close()
2849 if (!virtio_has_feature(dev->protocol_features, in vhost_user_dev_start()
2855 if (dev->vq_index + dev->nvqs != dev->vq_index_end) { in vhost_user_dev_start()
2871 if (dev->vq_index + dev->nvqs != dev->vq_index_end) { in vhost_user_reset_status()
2875 if (virtio_has_feature(dev->protocol_features, in vhost_user_reset_status()
2883 return virtio_has_feature(dev->protocol_features, in vhost_user_supports_device_state()
2895 struct vhost_user *vu = dev->opaque; in vhost_user_set_device_state_fd()
2908 *reply_fd = -1; in vhost_user_set_device_state_fd()
2912 error_setg(errp, "Back-end does not support migration state transfer"); in vhost_user_set_device_state_fd()
2913 return -ENOTSUP; in vhost_user_set_device_state_fd()
2919 error_setg_errno(errp, -ret, in vhost_user_set_device_state_fd()
2926 error_setg_errno(errp, -ret, in vhost_user_set_device_state_fd()
2935 return -EPROTO; in vhost_user_set_device_state_fd()
2942 return -EPROTO; in vhost_user_set_device_state_fd()
2946 error_setg(errp, "Back-end did not accept migration state transfer"); in vhost_user_set_device_state_fd()
2947 return -EIO; in vhost_user_set_device_state_fd()
2951 *reply_fd = qemu_chr_fe_get_msgfd(vu->user->chr); in vhost_user_set_device_state_fd()
2954 "Failed to get back-end-provided transfer pipe FD"); in vhost_user_set_device_state_fd()
2955 *reply_fd = -1; in vhost_user_set_device_state_fd()
2956 return -EIO; in vhost_user_set_device_state_fd()
2975 error_setg(errp, "Back-end does not support migration state transfer"); in vhost_user_check_device_state()
2976 return -ENOTSUP; in vhost_user_check_device_state()
2981 error_setg_errno(errp, -ret, in vhost_user_check_device_state()
2988 error_setg_errno(errp, -ret, in vhost_user_check_device_state()
2997 return -EPROTO; in vhost_user_check_device_state()
3004 return -EPROTO; in vhost_user_check_device_state()
3008 error_setg(errp, "Back-end failed to process its internal state"); in vhost_user_check_device_state()
3009 return -EIO; in vhost_user_check_device_state()