Lines Matching +full:- +full:- +full:enable +full:- +full:trace +full:- +full:backends
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
21 #include "qemu/error-report.h"
24 #include "standard-headers/linux/vhost_types.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/mem/memory-device.h"
28 #include "migration/qemu-file-types.h"
30 #include "trace.h"
39 strerror(-retval), -retval); \
59 max = MIN(max, hdev->vhost_ops->vhost_backend_memslots_limit(hdev)); in vhost_get_max_memslots()
70 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev); in vhost_get_free_memslots()
71 unsigned int cur_free = r - hdev->mem->nregions; in vhost_get_free_memslots()
73 if (unlikely(r < hdev->mem->nregions)) { in vhost_get_free_memslots()
75 " the device limit (%u).", hdev->mem->nregions, r); in vhost_get_free_memslots()
89 vhost_log_chunk_t *dev_log = dev->log->log; in vhost_dev_sync_region()
100 assert(end / VHOST_LOG_CHUNK < dev->log_size); in vhost_dev_sync_region()
101 assert(start / VHOST_LOG_CHUNK < dev->log_size); in vhost_dev_sync_region()
105 /* We first check with non-atomic: much cheaper, in vhost_dev_sync_region()
106 * and we expect non-dirty to be the common case. */ in vhost_dev_sync_region()
120 section_offset = page_addr - section->offset_within_address_space; in vhost_dev_sync_region()
121 mr_offset = section_offset + section->offset_within_region; in vhost_dev_sync_region()
122 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE); in vhost_dev_sync_region()
131 VirtIODevice *vdev = dev->vdev; in vhost_dev_has_iommu()
136 * does not have IOMMU, there's no need to enable this feature in vhost_dev_has_iommu()
149 assert(dev->vhost_ops); in vhost_dev_should_log()
150 assert(dev->vhost_ops->backend_type > VHOST_BACKEND_TYPE_NONE); in vhost_dev_should_log()
151 assert(dev->vhost_ops->backend_type < VHOST_BACKEND_TYPE_MAX); in vhost_dev_should_log()
153 return dev == QLIST_FIRST(&vhost_log_devs[dev->vhost_ops->backend_type]); in vhost_dev_should_log()
160 assert(hdev->vhost_ops); in vhost_dev_elect_mem_logger()
162 backend_type = hdev->vhost_ops->backend_type; in vhost_dev_elect_mem_logger()
197 if (!dev->log_enabled || !dev->started) { in vhost_sync_dirty_bitmap()
200 start_addr = section->offset_within_address_space; in vhost_sync_dirty_bitmap()
201 end_addr = range_get_last(start_addr, int128_get64(section->size)); in vhost_sync_dirty_bitmap()
206 for (i = 0; i < dev->mem->nregions; ++i) { in vhost_sync_dirty_bitmap()
207 struct vhost_memory_region *reg = dev->mem->regions + i; in vhost_sync_dirty_bitmap()
209 reg->guest_phys_addr, in vhost_sync_dirty_bitmap()
210 range_get_last(reg->guest_phys_addr, in vhost_sync_dirty_bitmap()
211 reg->memory_size)); in vhost_sync_dirty_bitmap()
214 for (i = 0; i < dev->nvqs; ++i) { in vhost_sync_dirty_bitmap()
215 struct vhost_virtqueue *vq = dev->vqs + i; in vhost_sync_dirty_bitmap()
217 if (!vq->used_phys && !vq->used_size) { in vhost_sync_dirty_bitmap()
223 hwaddr used_phys = vq->used_phys, used_size = vq->used_size; in vhost_sync_dirty_bitmap()
228 iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as, in vhost_sync_dirty_bitmap()
238 return -EINVAL; in vhost_sync_dirty_bitmap()
248 s = iotlb.addr_mask - offset; in vhost_sync_dirty_bitmap()
254 s = MIN(s, used_size - 1) + 1; in vhost_sync_dirty_bitmap()
258 used_size -= s; in vhost_sync_dirty_bitmap()
263 end_addr, vq->used_phys, in vhost_sync_dirty_bitmap()
264 range_get_last(vq->used_phys, vq->used_size)); in vhost_sync_dirty_bitmap()
283 for (i = 0; i < dev->n_mem_sections; ++i) { in vhost_log_sync_range()
284 MemoryRegionSection *section = &dev->mem_sections[i]; in vhost_log_sync_range()
293 for (i = 0; i < dev->mem->nregions; ++i) { in vhost_get_log_size()
294 struct vhost_memory_region *reg = dev->mem->regions + i; in vhost_get_log_size()
295 uint64_t last = range_get_last(reg->guest_phys_addr, in vhost_get_log_size()
296 reg->memory_size); in vhost_get_log_size()
310 dev->vhost_ops = &kernel_ops; in vhost_set_backend_type()
315 dev->vhost_ops = &user_ops; in vhost_set_backend_type()
320 dev->vhost_ops = &vdpa_ops; in vhost_set_backend_type()
325 r = -1; in vhost_set_backend_type()
329 assert(dev->vhost_ops->backend_type == backend_type); in vhost_set_backend_type()
339 uint64_t logsize = size * sizeof(*(log->log)); in vhost_log_alloc()
340 int fd = -1; in vhost_log_alloc()
344 log->log = qemu_memfd_alloc("vhost-log", logsize, in vhost_log_alloc()
352 memset(log->log, 0, logsize); in vhost_log_alloc()
354 log->log = g_malloc0(logsize); in vhost_log_alloc()
357 log->size = size; in vhost_log_alloc()
358 log->refcnt = 1; in vhost_log_alloc()
359 log->fd = fd; in vhost_log_alloc()
374 if (!log || log->size != size) { in vhost_log_get()
382 ++log->refcnt; in vhost_log_get()
390 struct vhost_log *log = dev->log; in vhost_log_put()
397 assert(dev->vhost_ops); in vhost_log_put()
398 backend_type = dev->vhost_ops->backend_type; in vhost_log_put()
405 --log->refcnt; in vhost_log_put()
406 if (log->refcnt == 0) { in vhost_log_put()
408 if (dev->log_size && sync) { in vhost_log_put()
409 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1); in vhost_log_put()
413 g_free(log->log); in vhost_log_put()
416 qemu_memfd_free(log->log, log->size * sizeof(*(log->log)), in vhost_log_put()
417 log->fd); in vhost_log_put()
425 dev->log = NULL; in vhost_log_put()
426 dev->log_size = 0; in vhost_log_put()
431 return dev->vhost_ops->vhost_requires_shm_log && in vhost_dev_log_is_shared()
432 dev->vhost_ops->vhost_requires_shm_log(dev); in vhost_dev_log_is_shared()
437 struct vhost_log *log = vhost_log_get(dev->vhost_ops->backend_type, in vhost_dev_log_resize()
439 uint64_t log_base = (uintptr_t)log->log; in vhost_dev_log_resize()
444 r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log); in vhost_dev_log_resize()
450 dev->log = log; in vhost_dev_log_resize()
451 dev->log_size = size; in vhost_dev_log_resize()
489 return -ENOMEM; in vhost_verify_ring_part_mapping()
492 hva_ring_offset = ring_gpa - reg_gpa; in vhost_verify_ring_part_mapping()
494 return -EBUSY; in vhost_verify_ring_part_mapping()
517 for (i = 0; i < dev->nvqs; ++i) { in vhost_verify_ring_mappings()
518 struct vhost_virtqueue *vq = dev->vqs + i; in vhost_verify_ring_mappings()
520 if (vq->desc_phys == 0) { in vhost_verify_ring_mappings()
526 vq->desc, vq->desc_phys, vq->desc_size, in vhost_verify_ring_mappings()
534 vq->avail, vq->avail_phys, vq->avail_size, in vhost_verify_ring_mappings()
542 vq->used, vq->used_phys, vq->used_size, in vhost_verify_ring_mappings()
549 if (r == -ENOMEM) { in vhost_verify_ring_mappings()
551 } else if (r == -EBUSY) { in vhost_verify_ring_mappings()
565 MemoryRegion *mr = section->mr; in vhost_section()
573 * dirty-tracking other than migration for which it has in vhost_section()
576 * self-modiying code detection flags. However a vhost-user in vhost_section()
577 * client could still confuse a TCG guest if it re-writes in vhost_section()
584 trace_vhost_reject_section(mr->name, 1); in vhost_section()
589 * Some backends (like vhost-user) can only handle memory regions in vhost_section()
595 if (memory_region_get_fd(section->mr) < 0 && in vhost_section()
596 dev->vhost_ops->vhost_backend_no_private_memslots && in vhost_section()
597 dev->vhost_ops->vhost_backend_no_private_memslots(dev)) { in vhost_section()
598 trace_vhost_reject_section(mr->name, 2); in vhost_section()
602 trace_vhost_section(mr->name); in vhost_section()
605 trace_vhost_reject_section(mr->name, 3); in vhost_section()
614 dev->tmp_sections = NULL; in vhost_begin()
615 dev->n_tmp_sections = 0; in vhost_begin()
634 old_sections = dev->mem_sections; in vhost_commit()
635 n_old_sections = dev->n_mem_sections; in vhost_commit()
636 dev->mem_sections = dev->tmp_sections; in vhost_commit()
637 dev->n_mem_sections = dev->n_tmp_sections; in vhost_commit()
639 if (dev->n_mem_sections != n_old_sections) { in vhost_commit()
645 &dev->mem_sections[i])) { in vhost_commit()
652 trace_vhost_commit(dev->started, changed); in vhost_commit()
659 dev->n_mem_sections * sizeof dev->mem->regions[0]; in vhost_commit()
660 dev->mem = g_realloc(dev->mem, regions_size); in vhost_commit()
661 dev->mem->nregions = dev->n_mem_sections; in vhost_commit()
663 for (i = 0; i < dev->n_mem_sections; i++) { in vhost_commit()
664 struct vhost_memory_region *cur_vmr = dev->mem->regions + i; in vhost_commit()
665 struct MemoryRegionSection *mrs = dev->mem_sections + i; in vhost_commit()
667 cur_vmr->guest_phys_addr = mrs->offset_within_address_space; in vhost_commit()
668 cur_vmr->memory_size = int128_get64(mrs->size); in vhost_commit()
669 cur_vmr->userspace_addr = in vhost_commit()
670 (uintptr_t)memory_region_get_ram_ptr(mrs->mr) + in vhost_commit()
671 mrs->offset_within_region; in vhost_commit()
672 cur_vmr->flags_padding = 0; in vhost_commit()
675 if (!dev->started) { in vhost_commit()
679 for (i = 0; i < dev->mem->nregions; i++) { in vhost_commit()
681 (void *)(uintptr_t)dev->mem->regions[i].userspace_addr, in vhost_commit()
682 dev->mem->regions[i].guest_phys_addr, in vhost_commit()
683 dev->mem->regions[i].memory_size)) { in vhost_commit()
689 if (!dev->log_enabled) { in vhost_commit()
690 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); in vhost_commit()
699 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log) in vhost_commit()
701 if (dev->log_size < log_size) { in vhost_commit()
704 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); in vhost_commit()
709 if (dev->log_size > log_size + VHOST_LOG_BUFFER) { in vhost_commit()
718 while (n_old_sections--) { in vhost_commit()
733 uint64_t mrs_size = int128_get64(section->size); in vhost_region_add_section()
734 uint64_t mrs_gpa = section->offset_within_address_space; in vhost_region_add_section()
735 uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) + in vhost_region_add_section()
736 section->offset_within_region; in vhost_region_add_section()
737 RAMBlock *mrs_rb = section->mr->ram_block; in vhost_region_add_section()
739 trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size, in vhost_region_add_section()
742 if (dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER) { in vhost_region_add_section()
746 uint64_t alignage = mrs_host & (mrs_page - 1); in vhost_region_add_section()
748 mrs_host -= alignage; in vhost_region_add_section()
750 mrs_gpa -= alignage; in vhost_region_add_section()
753 alignage = mrs_size & (mrs_page - 1); in vhost_region_add_section()
755 mrs_size += mrs_page - alignage; in vhost_region_add_section()
757 trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa, in vhost_region_add_section()
761 if (dev->n_tmp_sections && !section->unmergeable) { in vhost_region_add_section()
767 MemoryRegionSection *prev_sec = dev->tmp_sections + in vhost_region_add_section()
768 (dev->n_tmp_sections - 1); in vhost_region_add_section()
769 uint64_t prev_gpa_start = prev_sec->offset_within_address_space; in vhost_region_add_section()
770 uint64_t prev_size = int128_get64(prev_sec->size); in vhost_region_add_section()
773 (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) + in vhost_region_add_section()
774 prev_sec->offset_within_region; in vhost_region_add_section()
778 /* OK, looks like overlapping/intersecting - it's possible that in vhost_region_add_section()
785 __func__, section->mr->name, mrs_gpa, in vhost_region_add_section()
786 prev_sec->mr->name, prev_gpa_start); in vhost_region_add_section()
791 size_t offset = mrs_gpa - prev_gpa_start; in vhost_region_add_section()
794 section->mr == prev_sec->mr && !prev_sec->unmergeable) { in vhost_region_add_section()
797 prev_sec->offset_within_address_space = in vhost_region_add_section()
799 prev_sec->offset_within_region = in vhost_region_add_section()
800 MIN(prev_host_start, mrs_host) - in vhost_region_add_section()
801 (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr); in vhost_region_add_section()
802 prev_sec->size = int128_make64(max_end - MIN(prev_host_start, in vhost_region_add_section()
804 trace_vhost_region_add_section_merge(section->mr->name, in vhost_region_add_section()
805 int128_get64(prev_sec->size), in vhost_region_add_section()
806 prev_sec->offset_within_address_space, in vhost_region_add_section()
807 prev_sec->offset_within_region); in vhost_region_add_section()
823 ++dev->n_tmp_sections; in vhost_region_add_section()
824 dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections, in vhost_region_add_section()
825 dev->n_tmp_sections); in vhost_region_add_section()
826 dev->tmp_sections[dev->n_tmp_sections - 1] = *section; in vhost_region_add_section()
830 dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL; in vhost_region_add_section()
831 memory_region_ref(section->mr); in vhost_region_add_section()
851 struct vhost_dev *hdev = iommu->hdev; in vhost_iommu_unmap_notify()
852 hwaddr iova = iotlb->iova + iommu->iommu_offset; in vhost_iommu_unmap_notify()
855 iotlb->addr_mask + 1)) { in vhost_iommu_unmap_notify()
870 if (!memory_region_is_iommu(section->mr)) { in vhost_iommu_region_add()
874 iommu_mr = IOMMU_MEMORY_REGION(section->mr); in vhost_iommu_region_add()
877 end = int128_add(int128_make64(section->offset_within_region), in vhost_iommu_region_add()
878 section->size); in vhost_iommu_region_add()
882 iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify, in vhost_iommu_region_add()
883 dev->vdev->device_iotlb_enabled ? in vhost_iommu_region_add()
886 section->offset_within_region, in vhost_iommu_region_add()
889 iommu->mr = section->mr; in vhost_iommu_region_add()
890 iommu->iommu_offset = section->offset_within_address_space - in vhost_iommu_region_add()
891 section->offset_within_region; in vhost_iommu_region_add()
892 iommu->hdev = dev; in vhost_iommu_region_add()
893 memory_region_register_iommu_notifier(section->mr, &iommu->n, in vhost_iommu_region_add()
895 QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next); in vhost_iommu_region_add()
906 if (!memory_region_is_iommu(section->mr)) { in vhost_iommu_region_del()
910 QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) { in vhost_iommu_region_del()
911 if (iommu->mr == section->mr && in vhost_iommu_region_del()
912 iommu->n.start == section->offset_within_region) { in vhost_iommu_region_del()
913 memory_region_unregister_iommu_notifier(iommu->mr, in vhost_iommu_region_del()
914 &iommu->n); in vhost_iommu_region_del()
928 if (vdev->vhost_started) { in vhost_toggle_device_iotlb()
929 dev = vdc->get_vhost(vdev); in vhost_toggle_device_iotlb()
934 QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) { in vhost_toggle_device_iotlb()
935 memory_region_unregister_iommu_notifier(iommu->mr, &iommu->n); in vhost_toggle_device_iotlb()
936 iommu->n.notifier_flags = vdev->device_iotlb_enabled ? in vhost_toggle_device_iotlb()
938 memory_region_register_iommu_notifier(iommu->mr, &iommu->n, in vhost_toggle_device_iotlb()
951 if (dev->vhost_ops->vhost_vq_get_addr) { in vhost_virtqueue_set_addr()
952 r = dev->vhost_ops->vhost_vq_get_addr(dev, &addr, vq); in vhost_virtqueue_set_addr()
958 addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc; in vhost_virtqueue_set_addr()
959 addr.avail_user_addr = (uint64_t)(unsigned long)vq->avail; in vhost_virtqueue_set_addr()
960 addr.used_user_addr = (uint64_t)(unsigned long)vq->used; in vhost_virtqueue_set_addr()
963 addr.log_guest_addr = vq->used_phys; in vhost_virtqueue_set_addr()
965 r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr); in vhost_virtqueue_set_addr()
975 uint64_t features = dev->acked_features; in vhost_dev_set_features()
983 if (dev->vhost_ops->vhost_force_iommu) { in vhost_dev_set_features()
984 if (dev->vhost_ops->vhost_force_iommu(dev) == true) { in vhost_dev_set_features()
988 r = dev->vhost_ops->vhost_set_features(dev, features); in vhost_dev_set_features()
993 if (dev->vhost_ops->vhost_set_backend_cap) { in vhost_dev_set_features()
994 r = dev->vhost_ops->vhost_set_backend_cap(dev); in vhost_dev_set_features()
1014 for (i = 0; i < dev->nvqs; ++i) { in vhost_dev_set_log()
1015 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); in vhost_dev_set_log()
1016 addr = virtio_queue_get_desc_addr(dev->vdev, idx); in vhost_dev_set_log()
1026 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, in vhost_dev_set_log()
1043 for (; i >= 0; --i) { in vhost_dev_set_log()
1044 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); in vhost_dev_set_log()
1045 addr = virtio_queue_get_desc_addr(dev->vdev, idx); in vhost_dev_set_log()
1049 vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, in vhost_dev_set_log()
1050 dev->log_enabled); in vhost_dev_set_log()
1052 vhost_dev_set_features(dev, dev->log_enabled); in vhost_dev_set_log()
1057 static int vhost_migration_log(MemoryListener *listener, bool enable) in vhost_migration_log() argument
1062 if (enable == dev->log_enabled) { in vhost_migration_log()
1065 if (!dev->started) { in vhost_migration_log()
1066 dev->log_enabled = enable; in vhost_migration_log()
1071 if (!enable) { in vhost_migration_log()
1086 dev->log_enabled = enable; in vhost_migration_log()
1088 * vhost-user-* devices could change their state during log in vhost_migration_log()
1092 if (!dev->started) { in vhost_migration_log()
1101 dev->log_enabled = false; in vhost_migration_log()
1113 error_setg_errno(errp, -r, "vhost: Failed to start logging"); in vhost_log_global_start()
1145 * cross-endian legacy devices and modern devices. Only legacy devices
1146 * exposed to a bi-endian guest may require the vhost driver to use a
1155 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE; in vhost_needs_vring_endian()
1157 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG; in vhost_needs_vring_endian()
1171 r = dev->vhost_ops->vhost_set_vring_endian(dev, &s); in vhost_virtqueue_set_vring_endian_legacy()
1184 for (i = 0; i < hdev->mem->nregions; i++) { in vhost_memory_region_lookup()
1185 struct vhost_memory_region *reg = hdev->mem->regions + i; in vhost_memory_region_lookup()
1187 if (gpa >= reg->guest_phys_addr && in vhost_memory_region_lookup()
1188 reg->guest_phys_addr + reg->memory_size > gpa) { in vhost_memory_region_lookup()
1189 *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr; in vhost_memory_region_lookup()
1190 *len = reg->guest_phys_addr + reg->memory_size - gpa; in vhost_memory_region_lookup()
1195 return -EFAULT; in vhost_memory_region_lookup()
1202 int ret = -EFAULT; in vhost_device_iotlb_miss()
1208 iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as, in vhost_device_iotlb_miss()
1249 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); in vhost_virtqueue_start()
1264 vq->num = state.num = virtio_queue_get_num(vdev, idx); in vhost_virtqueue_start()
1265 r = dev->vhost_ops->vhost_set_vring_num(dev, &state); in vhost_virtqueue_start()
1272 r = dev->vhost_ops->vhost_set_vring_base(dev, &state); in vhost_virtqueue_start()
1287 vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx); in vhost_virtqueue_start()
1288 vq->desc_phys = a; in vhost_virtqueue_start()
1289 vq->desc = vhost_memory_map(dev, a, &l, false); in vhost_virtqueue_start()
1290 if (!vq->desc || l != s) { in vhost_virtqueue_start()
1291 r = -ENOMEM; in vhost_virtqueue_start()
1294 vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx); in vhost_virtqueue_start()
1295 vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx); in vhost_virtqueue_start()
1296 vq->avail = vhost_memory_map(dev, a, &l, false); in vhost_virtqueue_start()
1297 if (!vq->avail || l != s) { in vhost_virtqueue_start()
1298 r = -ENOMEM; in vhost_virtqueue_start()
1301 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx); in vhost_virtqueue_start()
1302 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx); in vhost_virtqueue_start()
1303 vq->used = vhost_memory_map(dev, a, &l, true); in vhost_virtqueue_start()
1304 if (!vq->used || l != s) { in vhost_virtqueue_start()
1305 r = -ENOMEM; in vhost_virtqueue_start()
1309 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled); in vhost_virtqueue_start()
1315 r = dev->vhost_ops->vhost_set_vring_kick(dev, &file); in vhost_virtqueue_start()
1322 event_notifier_test_and_clear(&vq->masked_notifier); in vhost_virtqueue_start()
1327 if (!vdev->use_guest_notifier_mask) { in vhost_virtqueue_start()
1332 if (k->query_guest_notifiers && in vhost_virtqueue_start()
1333 k->query_guest_notifiers(qbus->parent) && in vhost_virtqueue_start()
1335 file.fd = -1; in vhost_virtqueue_start()
1336 r = dev->vhost_ops->vhost_set_vring_call(dev, &file); in vhost_virtqueue_start()
1347 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), in vhost_virtqueue_start()
1350 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), in vhost_virtqueue_start()
1353 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), in vhost_virtqueue_start()
1364 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); in do_vhost_virtqueue_stop()
1376 r = dev->vhost_ops->vhost_get_vring_base(dev, &state); in do_vhost_virtqueue_stop()
1393 /* In the cross-endian case, we need to reset the vring endianness to in do_vhost_virtqueue_stop()
1402 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), in do_vhost_virtqueue_stop()
1404 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), in do_vhost_virtqueue_stop()
1406 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), in do_vhost_virtqueue_stop()
1422 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); in vhost_virtqueue_set_busyloop_timeout()
1429 if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) { in vhost_virtqueue_set_busyloop_timeout()
1430 return -EINVAL; in vhost_virtqueue_set_busyloop_timeout()
1433 r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state); in vhost_virtqueue_set_busyloop_timeout()
1446 struct vhost_dev *dev = vq->dev; in vhost_virtqueue_error_notifier()
1447 int index = vq - dev->vqs; in vhost_virtqueue_error_notifier()
1449 if (event_notifier_test_and_clear(n) && dev->vdev) { in vhost_virtqueue_error_notifier()
1450 VHOST_OPS_DEBUG(-EINVAL, "vhost vring error in virtqueue %d", in vhost_virtqueue_error_notifier()
1451 dev->vq_index + index); in vhost_virtqueue_error_notifier()
1458 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); in vhost_virtqueue_init()
1462 int r = event_notifier_init(&vq->masked_notifier, 0); in vhost_virtqueue_init()
1467 file.fd = event_notifier_get_wfd(&vq->masked_notifier); in vhost_virtqueue_init()
1468 r = dev->vhost_ops->vhost_set_vring_call(dev, &file); in vhost_virtqueue_init()
1474 vq->dev = dev; in vhost_virtqueue_init()
1476 if (dev->vhost_ops->vhost_set_vring_err) { in vhost_virtqueue_init()
1477 r = event_notifier_init(&vq->error_notifier, 0); in vhost_virtqueue_init()
1482 file.fd = event_notifier_get_fd(&vq->error_notifier); in vhost_virtqueue_init()
1483 r = dev->vhost_ops->vhost_set_vring_err(dev, &file); in vhost_virtqueue_init()
1489 event_notifier_set_handler(&vq->error_notifier, in vhost_virtqueue_init()
1496 event_notifier_cleanup(&vq->error_notifier); in vhost_virtqueue_init()
1498 event_notifier_cleanup(&vq->masked_notifier); in vhost_virtqueue_init()
1504 event_notifier_cleanup(&vq->masked_notifier); in vhost_virtqueue_cleanup()
1505 if (vq->dev->vhost_ops->vhost_set_vring_err) { in vhost_virtqueue_cleanup()
1506 event_notifier_set_handler(&vq->error_notifier, NULL); in vhost_virtqueue_cleanup()
1507 event_notifier_cleanup(&vq->error_notifier); in vhost_virtqueue_cleanup()
1519 hdev->vdev = NULL; in vhost_dev_init()
1520 hdev->migration_blocker = NULL; in vhost_dev_init()
1525 r = hdev->vhost_ops->vhost_backend_init(hdev, opaque, errp); in vhost_dev_init()
1530 r = hdev->vhost_ops->vhost_set_owner(hdev); in vhost_dev_init()
1532 error_setg_errno(errp, -r, "vhost_set_owner failed"); in vhost_dev_init()
1536 r = hdev->vhost_ops->vhost_get_features(hdev, &features); in vhost_dev_init()
1538 error_setg_errno(errp, -r, "vhost_get_features failed"); in vhost_dev_init()
1542 limit = hdev->vhost_ops->vhost_backend_memslots_limit(hdev); in vhost_dev_init()
1545 error_setg(errp, "some memory device (like virtio-mem)" in vhost_dev_init()
1551 r = -EINVAL; in vhost_dev_init()
1555 for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) { in vhost_dev_init()
1556 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i); in vhost_dev_init()
1558 error_setg_errno(errp, -r, "Failed to initialize virtqueue %d", i); in vhost_dev_init()
1564 for (i = 0; i < hdev->nvqs; ++i) { in vhost_dev_init()
1565 r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, in vhost_dev_init()
1568 error_setg_errno(errp, -r, "Failed to set busyloop timeout"); in vhost_dev_init()
1574 hdev->features = features; in vhost_dev_init()
1576 hdev->memory_listener = (MemoryListener) { in vhost_dev_init()
1590 hdev->iommu_listener = (MemoryListener) { in vhost_dev_init()
1591 .name = "vhost-iommu", in vhost_dev_init()
1596 if (hdev->migration_blocker == NULL) { in vhost_dev_init()
1597 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) { in vhost_dev_init()
1598 error_setg(&hdev->migration_blocker, in vhost_dev_init()
1601 error_setg(&hdev->migration_blocker, in vhost_dev_init()
1606 if (hdev->migration_blocker != NULL) { in vhost_dev_init()
1607 r = migrate_add_blocker_normal(&hdev->migration_blocker, errp); in vhost_dev_init()
1613 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); in vhost_dev_init()
1614 hdev->n_mem_sections = 0; in vhost_dev_init()
1615 hdev->mem_sections = NULL; in vhost_dev_init()
1616 hdev->log = NULL; in vhost_dev_init()
1617 hdev->log_size = 0; in vhost_dev_init()
1618 hdev->log_enabled = false; in vhost_dev_init()
1619 hdev->started = false; in vhost_dev_init()
1620 memory_listener_register(&hdev->memory_listener, &address_space_memory); in vhost_dev_init()
1627 used = hdev->mem->nregions; in vhost_dev_init()
1632 * memslot would be ROM. If ever relevant, we can optimize for that -- in vhost_dev_init()
1640 r = -EINVAL; in vhost_dev_init()
1648 while (--i >= 0) { in vhost_dev_init()
1649 vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0); in vhost_dev_init()
1653 hdev->nvqs = n_initialized_vqs; in vhost_dev_init()
1664 for (i = 0; i < hdev->nvqs; ++i) { in vhost_dev_cleanup()
1665 vhost_virtqueue_cleanup(hdev->vqs + i); in vhost_dev_cleanup()
1667 if (hdev->mem) { in vhost_dev_cleanup()
1669 memory_listener_unregister(&hdev->memory_listener); in vhost_dev_cleanup()
1672 migrate_del_blocker(&hdev->migration_blocker); in vhost_dev_cleanup()
1673 g_free(hdev->mem); in vhost_dev_cleanup()
1674 g_free(hdev->mem_sections); in vhost_dev_cleanup()
1675 if (hdev->vhost_ops) { in vhost_dev_cleanup()
1676 hdev->vhost_ops->vhost_backend_cleanup(hdev); in vhost_dev_cleanup()
1678 assert(!hdev->log); in vhost_dev_cleanup()
1697 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, in vhost_dev_disable_notifiers_nvqs()
1700 error_report("vhost VQ %d notifier cleanup failed: %d", i, -r); in vhost_dev_disable_notifiers_nvqs()
1712 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i); in vhost_dev_disable_notifiers_nvqs()
1740 for (i = 0; i < hdev->nvqs; ++i) { in vhost_dev_enable_notifiers()
1741 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, in vhost_dev_enable_notifiers()
1744 error_report("vhost VQ %d notifier binding failed: %d", i, -r); in vhost_dev_enable_notifiers()
1763 vhost_dev_disable_notifiers_nvqs(hdev, vdev, hdev->nvqs); in vhost_dev_disable_notifiers()
1771 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index; in vhost_virtqueue_pending()
1772 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs); in vhost_virtqueue_pending()
1773 return event_notifier_test_and_clear(&vq->masked_notifier); in vhost_virtqueue_pending()
1781 int r, index = n - hdev->vq_index; in vhost_virtqueue_mask()
1785 assert(hdev->vhost_ops); in vhost_virtqueue_mask()
1788 assert(vdev->use_guest_notifier_mask); in vhost_virtqueue_mask()
1789 file.fd = event_notifier_get_wfd(&hdev->vqs[index].masked_notifier); in vhost_virtqueue_mask()
1794 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n); in vhost_virtqueue_mask()
1795 r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file); in vhost_virtqueue_mask()
1797 error_report("vhost_set_vring_call failed %d", -r); in vhost_virtqueue_mask()
1803 assert(hdev->vhost_ops); in vhost_config_pending()
1804 if ((hdev->started == false) || in vhost_config_pending()
1805 (hdev->vhost_ops->vhost_set_config_call == NULL)) { in vhost_config_pending()
1810 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier; in vhost_config_pending()
1819 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier; in vhost_config_mask()
1820 EventNotifier *config_notifier = &vdev->config_notifier; in vhost_config_mask()
1821 assert(hdev->vhost_ops); in vhost_config_mask()
1823 if ((hdev->started == false) || in vhost_config_mask()
1824 (hdev->vhost_ops->vhost_set_config_call == NULL)) { in vhost_config_mask()
1828 assert(vdev->use_guest_notifier_mask); in vhost_config_mask()
1833 r = hdev->vhost_ops->vhost_set_config_call(hdev, fd); in vhost_config_mask()
1835 error_report("vhost_set_config_call failed %d", -r); in vhost_config_mask()
1841 int fd = -1; in vhost_stop_config_intr()
1842 assert(dev->vhost_ops); in vhost_stop_config_intr()
1843 if (dev->vhost_ops->vhost_set_config_call) { in vhost_stop_config_intr()
1844 dev->vhost_ops->vhost_set_config_call(dev, fd); in vhost_stop_config_intr()
1852 assert(dev->vhost_ops); in vhost_start_config_intr()
1853 int fd = event_notifier_get_fd(&dev->vdev->config_notifier); in vhost_start_config_intr()
1854 if (dev->vhost_ops->vhost_set_config_call) { in vhost_start_config_intr()
1855 r = dev->vhost_ops->vhost_set_config_call(dev, fd); in vhost_start_config_intr()
1857 event_notifier_set(&dev->vdev->config_notifier); in vhost_start_config_intr()
1868 if (!(hdev->features & bit_mask)) { in vhost_get_features()
1883 hdev->acked_features |= bit_mask; in vhost_ack_features()
1892 assert(hdev->vhost_ops); in vhost_dev_get_config()
1894 if (hdev->vhost_ops->vhost_get_config) { in vhost_dev_get_config()
1895 return hdev->vhost_ops->vhost_get_config(hdev, config, config_len, in vhost_dev_get_config()
1900 return -ENOSYS; in vhost_dev_get_config()
1906 assert(hdev->vhost_ops); in vhost_dev_set_config()
1908 if (hdev->vhost_ops->vhost_set_config) { in vhost_dev_set_config()
1909 return hdev->vhost_ops->vhost_set_config(hdev, data, offset, in vhost_dev_set_config()
1913 return -ENOSYS; in vhost_dev_set_config()
1919 hdev->config_ops = ops; in vhost_dev_set_config_notifier()
1924 if (inflight && inflight->addr) { in vhost_dev_free_inflight()
1925 qemu_memfd_free(inflight->addr, inflight->size, inflight->fd); in vhost_dev_free_inflight()
1926 inflight->addr = NULL; in vhost_dev_free_inflight()
1927 inflight->fd = -1; in vhost_dev_free_inflight()
1935 if (hdev->vhost_ops->vhost_get_inflight_fd == NULL || in vhost_dev_prepare_inflight()
1936 hdev->vhost_ops->vhost_set_inflight_fd == NULL) { in vhost_dev_prepare_inflight()
1940 hdev->vdev = vdev; in vhost_dev_prepare_inflight()
1942 r = vhost_dev_set_features(hdev, hdev->log_enabled); in vhost_dev_prepare_inflight()
1956 if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) { in vhost_dev_set_inflight()
1957 r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight); in vhost_dev_set_inflight()
1972 if (dev->vhost_ops->vhost_get_inflight_fd) { in vhost_dev_get_inflight()
1973 r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight); in vhost_dev_get_inflight()
1983 static int vhost_dev_set_vring_enable(struct vhost_dev *hdev, int enable) in vhost_dev_set_vring_enable() argument
1985 if (!hdev->vhost_ops->vhost_set_vring_enable) { in vhost_dev_set_vring_enable()
1990 * For vhost-user devices, if VHOST_USER_F_PROTOCOL_FEATURES has not in vhost_dev_set_vring_enable()
1995 if (hdev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER && in vhost_dev_set_vring_enable()
1996 !virtio_has_feature(hdev->backend_features, in vhost_dev_set_vring_enable()
2001 return hdev->vhost_ops->vhost_set_vring_enable(hdev, enable); in vhost_dev_set_vring_enable()
2007 * If @vrings is true, this function will enable all vrings before starting the
2016 assert(hdev->vhost_ops); in vhost_dev_start()
2018 trace_vhost_dev_start(hdev, vdev->name, vrings); in vhost_dev_start()
2020 vdev->vhost_started = true; in vhost_dev_start()
2021 hdev->started = true; in vhost_dev_start()
2022 hdev->vdev = vdev; in vhost_dev_start()
2024 r = vhost_dev_set_features(hdev, hdev->log_enabled); in vhost_dev_start()
2030 memory_listener_register(&hdev->iommu_listener, vdev->dma_as); in vhost_dev_start()
2033 r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem); in vhost_dev_start()
2038 for (i = 0; i < hdev->nvqs; ++i) { in vhost_dev_start()
2041 hdev->vqs + i, in vhost_dev_start()
2042 hdev->vq_index + i); in vhost_dev_start()
2049 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier, 0); in vhost_dev_start()
2055 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); in vhost_dev_start()
2056 if (!vdev->use_guest_notifier_mask) { in vhost_dev_start()
2059 if (hdev->log_enabled) { in vhost_dev_start()
2062 hdev->log_size = vhost_get_log_size(hdev); in vhost_dev_start()
2063 hdev->log = vhost_log_get(hdev->vhost_ops->backend_type, in vhost_dev_start()
2064 hdev->log_size, in vhost_dev_start()
2066 log_base = (uintptr_t)hdev->log->log; in vhost_dev_start()
2067 r = hdev->vhost_ops->vhost_set_log_base(hdev, in vhost_dev_start()
2068 hdev->log_size ? log_base : 0, in vhost_dev_start()
2069 hdev->log); in vhost_dev_start()
2082 if (hdev->vhost_ops->vhost_dev_start) { in vhost_dev_start()
2083 r = hdev->vhost_ops->vhost_dev_start(hdev, true); in vhost_dev_start()
2089 hdev->vhost_ops->vhost_set_iotlb_callback) { in vhost_dev_start()
2090 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true); in vhost_dev_start()
2093 * vhost-kernel code requires for this.*/ in vhost_dev_start()
2094 for (i = 0; i < hdev->nvqs; ++i) { in vhost_dev_start()
2095 struct vhost_virtqueue *vq = hdev->vqs + i; in vhost_dev_start()
2096 r = vhost_device_iotlb_miss(hdev, vq->used_phys, true); in vhost_dev_start()
2106 hdev->vhost_ops->vhost_set_iotlb_callback) { in vhost_dev_start()
2107 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false); in vhost_dev_start()
2109 if (hdev->vhost_ops->vhost_dev_start) { in vhost_dev_start()
2110 hdev->vhost_ops->vhost_dev_start(hdev, false); in vhost_dev_start()
2119 while (--i >= 0) { in vhost_dev_start()
2122 hdev->vqs + i, in vhost_dev_start()
2123 hdev->vq_index + i); in vhost_dev_start()
2128 memory_listener_unregister(&hdev->iommu_listener); in vhost_dev_start()
2131 vdev->vhost_started = false; in vhost_dev_start()
2132 hdev->started = false; in vhost_dev_start()
2144 assert(hdev->vhost_ops); in do_vhost_dev_stop()
2146 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); in do_vhost_dev_stop()
2147 event_notifier_test_and_clear(&vdev->config_notifier); in do_vhost_dev_stop()
2149 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); in do_vhost_dev_stop()
2151 trace_vhost_dev_stop(hdev, vdev->name, vrings); in do_vhost_dev_stop()
2153 if (hdev->vhost_ops->vhost_dev_start) { in do_vhost_dev_stop()
2154 hdev->vhost_ops->vhost_dev_start(hdev, false); in do_vhost_dev_stop()
2159 for (i = 0; i < hdev->nvqs; ++i) { in do_vhost_dev_stop()
2162 hdev->vqs + i, in do_vhost_dev_stop()
2163 hdev->vq_index + i, in do_vhost_dev_stop()
2166 if (hdev->vhost_ops->vhost_reset_status) { in do_vhost_dev_stop()
2167 hdev->vhost_ops->vhost_reset_status(hdev); in do_vhost_dev_stop()
2171 if (hdev->vhost_ops->vhost_set_iotlb_callback) { in do_vhost_dev_stop()
2172 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false); in do_vhost_dev_stop()
2174 memory_listener_unregister(&hdev->iommu_listener); in do_vhost_dev_stop()
2178 hdev->started = false; in do_vhost_dev_stop()
2179 vdev->vhost_started = false; in do_vhost_dev_stop()
2180 hdev->vdev = NULL; in do_vhost_dev_stop()
2198 if (hdev->vhost_ops->vhost_net_set_backend) { in vhost_net_set_backend()
2199 return hdev->vhost_ops->vhost_net_set_backend(hdev, file); in vhost_net_set_backend()
2202 return -ENOSYS; in vhost_net_set_backend()
2207 if (hdev->vhost_ops->vhost_reset_device) { in vhost_reset_device()
2208 return hdev->vhost_ops->vhost_reset_device(hdev); in vhost_reset_device()
2211 return -ENOSYS; in vhost_reset_device()
2216 if (dev->vhost_ops->vhost_supports_device_state) { in vhost_supports_device_state()
2217 return dev->vhost_ops->vhost_supports_device_state(dev); in vhost_supports_device_state()
2230 if (dev->vhost_ops->vhost_set_device_state_fd) { in vhost_set_device_state_fd()
2231 return dev->vhost_ops->vhost_set_device_state_fd(dev, direction, phase, in vhost_set_device_state_fd()
2237 return -ENOSYS; in vhost_set_device_state_fd()
2242 if (dev->vhost_ops->vhost_check_device_state) { in vhost_check_device_state()
2243 return dev->vhost_ops->vhost_check_device_state(dev, errp); in vhost_check_device_state()
2248 return -ENOSYS; in vhost_check_device_state()
2258 int pipe_fds[2], read_fd = -1, write_fd = -1, reply_fd = -1; in vhost_save_backend_state()
2261 /* [0] for reading (our end), [1] for writing (back-end's end) */ in vhost_save_backend_state()
2264 g_err->message); in vhost_save_backend_state()
2265 ret = -EINVAL; in vhost_save_backend_state()
2275 * vhost-user, so just check that it is stopped at all. in vhost_save_backend_state()
2277 assert(!dev->started); in vhost_save_backend_state()
2279 /* Transfer ownership of write_fd to the back-end */ in vhost_save_backend_state()
2291 /* If the back-end wishes to use a different pipe, switch over */ in vhost_save_backend_state()
2304 ret = -errno; in vhost_save_backend_state()
2305 error_setg_errno(errp, -ret, "Failed to receive state"); in vhost_save_backend_state()
2321 * Back-end will not really care, but be clean and close our end of the pipe in vhost_save_backend_state()
2322 * before inquiring the back-end about whether transfer was successful in vhost_save_backend_state()
2325 read_fd = -1; in vhost_save_backend_state()
2328 assert(!dev->started); in vhost_save_backend_state()
2350 int pipe_fds[2], read_fd = -1, write_fd = -1, reply_fd = -1; in vhost_load_backend_state()
2353 /* [0] for reading (back-end's end), [1] for writing (our end) */ in vhost_load_backend_state()
2356 g_err->message); in vhost_load_backend_state()
2357 ret = -EINVAL; in vhost_load_backend_state()
2367 * vhost-user, so just check that it is stopped at all. in vhost_load_backend_state()
2369 assert(!dev->started); in vhost_load_backend_state()
2371 /* Transfer ownership of read_fd to the back-end */ in vhost_load_backend_state()
2383 /* If the back-end wishes to use a different pipe, switch over */ in vhost_load_backend_state()
2408 ret = -EINVAL; in vhost_load_backend_state()
2418 ret = -errno; in vhost_load_backend_state()
2419 error_setg_errno(errp, -ret, "Failed to send state"); in vhost_load_backend_state()
2423 ret = -ECONNRESET; in vhost_load_backend_state()
2428 this_chunk_size -= write_ret; in vhost_load_backend_state()
2434 * Close our end, thus ending transfer, before inquiring the back-end about in vhost_load_backend_state()
2438 write_fd = -1; in vhost_load_backend_state()
2441 assert(!dev->started); in vhost_load_backend_state()