Lines Matching refs:dev

114 bool vu_has_feature(VuDev *dev,  in vu_has_feature()  argument
117 return has_feature(dev->features, fbit); in vu_has_feature()
120 static inline bool vu_has_protocol_feature(VuDev *dev, unsigned int fbit) in vu_has_protocol_feature() argument
122 return has_feature(dev->protocol_features, fbit); in vu_has_protocol_feature()
179 vu_panic(VuDev *dev, const char *msg, ...) in vu_panic() argument
190 dev->broken = true; in vu_panic()
191 dev->panic(dev, buf); in vu_panic()
202 vu_gpa_to_mem_region(VuDev *dev, uint64_t guest_addr) in vu_gpa_to_mem_region() argument
205 int high = dev->nregions - 1; in vu_gpa_to_mem_region()
217 VuDevRegion *cur = &dev->regions[mid]; in vu_gpa_to_mem_region()
234 vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr) in vu_gpa_to_va() argument
242 r = vu_gpa_to_mem_region(dev, guest_addr); in vu_gpa_to_va()
256 qva_to_va(VuDev *dev, uint64_t qemu_addr) in qva_to_va() argument
261 for (i = 0; i < dev->nregions; i++) { in qva_to_va()
262 VuDevRegion *r = &dev->regions[i]; in qva_to_va()
274 vu_remove_all_mem_regs(VuDev *dev) in vu_remove_all_mem_regs() argument
278 for (i = 0; i < dev->nregions; i++) { in vu_remove_all_mem_regs()
279 VuDevRegion *r = &dev->regions[i]; in vu_remove_all_mem_regs()
283 dev->nregions = 0; in vu_remove_all_mem_regs()
287 map_ring(VuDev *dev, VuVirtq *vq) in map_ring() argument
289 vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr); in map_ring()
290 vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr); in map_ring()
291 vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr); in map_ring()
302 vu_is_vq_usable(VuDev *dev, VuVirtq *vq) in vu_is_vq_usable() argument
304 if (unlikely(dev->broken)) { in vu_is_vq_usable()
322 if (map_ring(dev, vq)) { in vu_is_vq_usable()
323 vu_panic(dev, "remapping queue on access"); in vu_is_vq_usable()
330 unmap_rings(VuDev *dev, VuDevRegion *r) in unmap_rings() argument
334 for (i = 0; i < dev->max_queues; i++) { in unmap_rings()
335 VuVirtq *vq = &dev->vq[i]; in unmap_rings()
376 _vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd) in _vu_add_mem_reg() argument
386 int high = dev->nregions - 1; in _vu_add_mem_reg()
389 DPRINT("Adding region %d\n", dev->nregions); in _vu_add_mem_reg()
399 if (dev->postcopy_listening) { in _vu_add_mem_reg()
414 VuDevRegion *cur = &dev->regions[mid]; in _vu_add_mem_reg()
418 vu_panic(dev, "regions with overlapping guest physical addresses"); in _vu_add_mem_reg()
457 vu_panic(dev, "region mmap error: %s", strerror(errno)); in _vu_add_mem_reg()
470 r = &dev->regions[idx]; in _vu_add_mem_reg()
471 memmove(r + 1, r, sizeof(VuDevRegion) * (dev->nregions - idx)); in _vu_add_mem_reg()
477 dev->nregions++; in _vu_add_mem_reg()
479 if (dev->postcopy_listening) { in _vu_add_mem_reg()
537 vu_message_read_default(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) in vu_message_read_default() argument
559 vu_panic(dev, "Error while recvmsg: %s", strerror(errno)); in vu_message_read_default()
578 vu_panic(dev, in vu_message_read_default()
591 vu_panic(dev, "Error while reading: %s", strerror(errno)); in vu_message_read_default()
607 vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) in vu_message_write() argument
643 vu_panic(dev, "Error while writing: %s", strerror(errno)); in vu_message_write()
658 vu_panic(dev, "Error while writing: %s", strerror(errno)); in vu_message_write()
666 vu_send_reply(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) in vu_send_reply() argument
673 return vu_message_write(dev, conn_fd, vmsg); in vu_send_reply()
682 vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg) in vu_process_message_reply() argument
692 if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) { in vu_process_message_reply()
705 pthread_mutex_unlock(&dev->backend_mutex); in vu_process_message_reply()
711 vu_log_kick(VuDev *dev) in vu_log_kick() argument
713 if (dev->log_call_fd != -1) { in vu_log_kick()
715 if (eventfd_write(dev->log_call_fd, 1) < 0) { in vu_log_kick()
716 vu_panic(dev, "Error writing eventfd: %s", strerror(errno)); in vu_log_kick()
729 vu_log_write(VuDev *dev, uint64_t address, uint64_t length) in vu_log_write() argument
733 if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) || in vu_log_write()
734 !dev->log_table || !length) { in vu_log_write()
738 assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8)); in vu_log_write()
742 vu_log_page(dev->log_table, page); in vu_log_write()
746 vu_log_kick(dev); in vu_log_write()
750 vu_kick_cb(VuDev *dev, int condition, void *data) in vu_kick_cb() argument
753 VuVirtq *vq = &dev->vq[index]; in vu_kick_cb()
760 vu_panic(dev, "kick eventfd_read(): %s", strerror(errno)); in vu_kick_cb()
761 dev->remove_watch(dev, dev->vq[index].kick_fd); in vu_kick_cb()
766 vq->handler(dev, index); in vu_kick_cb()
772 vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_get_features_exec() argument
788 if (dev->iface->get_features) { in vu_get_features_exec()
789 vmsg->payload.u64 |= dev->iface->get_features(dev); in vu_get_features_exec()
801 vu_set_enable_all_rings(VuDev *dev, bool enabled) in vu_set_enable_all_rings() argument
805 for (i = 0; i < dev->max_queues; i++) { in vu_set_enable_all_rings()
806 dev->vq[i].enable = enabled; in vu_set_enable_all_rings()
811 vu_set_features_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_features_exec() argument
815 dev->features = vmsg->payload.u64; in vu_set_features_exec()
816 if (!vu_has_feature(dev, VIRTIO_F_VERSION_1)) { in vu_set_features_exec()
821 vu_panic(dev, "virtio legacy devices aren't supported by libvhost-user"); in vu_set_features_exec()
825 if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) { in vu_set_features_exec()
826 vu_set_enable_all_rings(dev, true); in vu_set_features_exec()
829 if (dev->iface->set_features) { in vu_set_features_exec()
830 dev->iface->set_features(dev, dev->features); in vu_set_features_exec()
837 vu_set_owner_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_owner_exec() argument
843 vu_close_log(VuDev *dev) in vu_close_log() argument
845 if (dev->log_table) { in vu_close_log()
846 if (munmap(dev->log_table, dev->log_size) != 0) { in vu_close_log()
850 dev->log_table = NULL; in vu_close_log()
852 if (dev->log_call_fd != -1) { in vu_close_log()
853 close(dev->log_call_fd); in vu_close_log()
854 dev->log_call_fd = -1; in vu_close_log()
859 vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_reset_device_exec() argument
861 vu_set_enable_all_rings(dev, false); in vu_reset_device_exec()
867 generate_faults(VuDev *dev) { in generate_faults() argument
869 for (i = 0; i < dev->nregions; i++) { in generate_faults()
871 VuDevRegion *dev_region = &dev->regions[i]; in generate_faults()
913 if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, &reg_struct)) { in generate_faults()
914 vu_panic(dev, "%s: Failed to userfault region %d " in generate_faults()
920 dev->postcopy_ufd, strerror(errno)); in generate_faults()
924 vu_panic(dev, "%s Region (%d) doesn't support COPY", in generate_faults()
936 vu_panic(dev, "failed to mprotect region %d for postcopy (%s)", in generate_faults()
948 vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { in vu_add_mem_reg() argument
953 vu_panic(dev, "VHOST_USER_ADD_MEM_REG received %d fds - only 1 fd " in vu_add_mem_reg()
960 vu_panic(dev, "VHOST_USER_ADD_MEM_REG requires a message size of at " in vu_add_mem_reg()
966 if (dev->nregions == VHOST_USER_MAX_RAM_SLOTS) { in vu_add_mem_reg()
968 vu_panic(dev, "failing attempt to hot add memory via " in vu_add_mem_reg()
979 if (dev->postcopy_listening && in vu_add_mem_reg()
982 (void)generate_faults(dev); in vu_add_mem_reg()
986 _vu_add_mem_reg(dev, msg_region, vmsg->fds[0]); in vu_add_mem_reg()
989 if (dev->postcopy_listening) { in vu_add_mem_reg()
1012 vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { in vu_rem_mem_reg() argument
1019 vu_panic(dev, "VHOST_USER_REM_MEM_REG received %d fds - at most 1 fd " in vu_rem_mem_reg()
1026 vu_panic(dev, "VHOST_USER_REM_MEM_REG requires a message size of at " in vu_rem_mem_reg()
1042 r = vu_gpa_to_mem_region(dev, msg_region->guest_phys_addr); in vu_rem_mem_reg()
1045 vu_panic(dev, "Specified region not found\n"); in vu_rem_mem_reg()
1057 unmap_rings(dev, r); in vu_rem_mem_reg()
1061 idx = r - dev->regions; in vu_rem_mem_reg()
1062 assert(idx < dev->nregions); in vu_rem_mem_reg()
1064 memmove(r, r + 1, sizeof(VuDevRegion) * (dev->nregions - idx - 1)); in vu_rem_mem_reg()
1066 dev->nregions--; in vu_rem_mem_reg()
1074 vu_get_shared_object(VuDev *dev, VhostUserMsg *vmsg) in vu_get_shared_object() argument
1078 if (dev->iface->get_shared_object) { in vu_get_shared_object()
1079 dmabuf_fd = dev->iface->get_shared_object( in vu_get_shared_object()
1080 dev, &vmsg->payload.object.uuid[0]); in vu_get_shared_object()
1092 vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_mem_table_exec() argument
1097 vu_remove_all_mem_regs(dev); in vu_set_mem_table_exec()
1101 _vu_add_mem_reg(dev, &memory->regions[i], vmsg->fds[i]); in vu_set_mem_table_exec()
1105 if (dev->postcopy_listening) { in vu_set_mem_table_exec()
1108 if (!vu_send_reply(dev, dev->sock, vmsg)) { in vu_set_mem_table_exec()
1109 vu_panic(dev, "failed to respond to set-mem-table for postcopy"); in vu_set_mem_table_exec()
1117 if (!dev->read_msg(dev, dev->sock, vmsg) || in vu_set_mem_table_exec()
1120 vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table"); in vu_set_mem_table_exec()
1125 (void)generate_faults(dev); in vu_set_mem_table_exec()
1129 for (i = 0; i < dev->max_queues; i++) { in vu_set_mem_table_exec()
1130 if (dev->vq[i].vring.desc) { in vu_set_mem_table_exec()
1131 if (map_ring(dev, &dev->vq[i])) { in vu_set_mem_table_exec()
1132 vu_panic(dev, "remapping queue %d during setmemtable", i); in vu_set_mem_table_exec()
1141 vu_set_log_base_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_log_base_exec() argument
1149 vu_panic(dev, "Invalid log_base message"); in vu_set_log_base_exec()
1166 if (dev->log_table) { in vu_set_log_base_exec()
1167 munmap(dev->log_table, dev->log_size); in vu_set_log_base_exec()
1169 dev->log_table = rc; in vu_set_log_base_exec()
1170 dev->log_size = log_mmap_size; in vu_set_log_base_exec()
1179 vu_set_log_fd_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_log_fd_exec() argument
1182 vu_panic(dev, "Invalid log_fd message"); in vu_set_log_fd_exec()
1186 if (dev->log_call_fd != -1) { in vu_set_log_fd_exec()
1187 close(dev->log_call_fd); in vu_set_log_fd_exec()
1189 dev->log_call_fd = vmsg->fds[0]; in vu_set_log_fd_exec()
1196 vu_set_vring_num_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_vring_num_exec() argument
1203 dev->vq[index].vring.num = num; in vu_set_vring_num_exec()
1209 vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_vring_addr_exec() argument
1213 VuVirtq *vq = &dev->vq[index]; in vu_set_vring_addr_exec()
1228 if (map_ring(dev, vq)) { in vu_set_vring_addr_exec()
1229 vu_panic(dev, "Invalid vring_addr message"); in vu_set_vring_addr_exec()
1236 bool resume = dev->iface->queue_is_processed_in_order && in vu_set_vring_addr_exec()
1237 dev->iface->queue_is_processed_in_order(dev, index); in vu_set_vring_addr_exec()
1252 vu_set_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_vring_base_exec() argument
1259 dev->vq[index].shadow_avail_idx = dev->vq[index].last_avail_idx = num; in vu_set_vring_base_exec()
1265 vu_get_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_get_vring_base_exec() argument
1270 vmsg->payload.state.num = dev->vq[index].last_avail_idx; in vu_get_vring_base_exec()
1273 dev->vq[index].started = false; in vu_get_vring_base_exec()
1274 if (dev->iface->queue_set_started) { in vu_get_vring_base_exec()
1275 dev->iface->queue_set_started(dev, index, false); in vu_get_vring_base_exec()
1278 if (dev->vq[index].call_fd != -1) { in vu_get_vring_base_exec()
1279 close(dev->vq[index].call_fd); in vu_get_vring_base_exec()
1280 dev->vq[index].call_fd = -1; in vu_get_vring_base_exec()
1282 if (dev->vq[index].kick_fd != -1) { in vu_get_vring_base_exec()
1283 dev->remove_watch(dev, dev->vq[index].kick_fd); in vu_get_vring_base_exec()
1284 close(dev->vq[index].kick_fd); in vu_get_vring_base_exec()
1285 dev->vq[index].kick_fd = -1; in vu_get_vring_base_exec()
1292 vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg) in vu_check_queue_msg_file() argument
1297 if (index >= dev->max_queues) { in vu_check_queue_msg_file()
1299 vu_panic(dev, "Invalid queue index: %u", index); in vu_check_queue_msg_file()
1310 vu_panic(dev, "Invalid fds in request: %d", vmsg->request); in vu_check_queue_msg_file()
1332 vu_check_queue_inflights(VuDev *dev, VuVirtq *vq) in vu_check_queue_inflights() argument
1336 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { in vu_check_queue_inflights()
1402 vu_set_vring_kick_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_vring_kick_exec() argument
1409 if (!vu_check_queue_msg_file(dev, vmsg)) { in vu_set_vring_kick_exec()
1413 if (dev->vq[index].kick_fd != -1) { in vu_set_vring_kick_exec()
1414 dev->remove_watch(dev, dev->vq[index].kick_fd); in vu_set_vring_kick_exec()
1415 close(dev->vq[index].kick_fd); in vu_set_vring_kick_exec()
1416 dev->vq[index].kick_fd = -1; in vu_set_vring_kick_exec()
1419 dev->vq[index].kick_fd = nofd ? -1 : vmsg->fds[0]; in vu_set_vring_kick_exec()
1420 DPRINT("Got kick_fd: %d for vq: %d\n", dev->vq[index].kick_fd, index); in vu_set_vring_kick_exec()
1422 dev->vq[index].started = true; in vu_set_vring_kick_exec()
1423 if (dev->iface->queue_set_started) { in vu_set_vring_kick_exec()
1424 dev->iface->queue_set_started(dev, index, true); in vu_set_vring_kick_exec()
1427 if (dev->vq[index].kick_fd != -1 && dev->vq[index].handler) { in vu_set_vring_kick_exec()
1428 dev->set_watch(dev, dev->vq[index].kick_fd, VU_WATCH_IN, in vu_set_vring_kick_exec()
1432 dev->vq[index].kick_fd, index); in vu_set_vring_kick_exec()
1435 if (vu_check_queue_inflights(dev, &dev->vq[index])) { in vu_set_vring_kick_exec()
1436 vu_panic(dev, "Failed to check inflights for vq: %d\n", index); in vu_set_vring_kick_exec()
1442 void vu_set_queue_handler(VuDev *dev, VuVirtq *vq, in vu_set_queue_handler() argument
1445 int qidx = vq - dev->vq; in vu_set_queue_handler()
1450 dev->set_watch(dev, vq->kick_fd, VU_WATCH_IN, in vu_set_queue_handler()
1453 dev->remove_watch(dev, vq->kick_fd); in vu_set_queue_handler()
1458 bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd, in vu_set_queue_host_notifier() argument
1461 int qidx = vq - dev->vq; in vu_set_queue_host_notifier()
1482 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD)) { in vu_set_queue_host_notifier()
1486 pthread_mutex_lock(&dev->backend_mutex); in vu_set_queue_host_notifier()
1487 if (!vu_message_write(dev, dev->backend_fd, &vmsg)) { in vu_set_queue_host_notifier()
1488 pthread_mutex_unlock(&dev->backend_mutex); in vu_set_queue_host_notifier()
1493 return vu_process_message_reply(dev, &vmsg); in vu_set_queue_host_notifier()
1497 vu_lookup_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN], in vu_lookup_shared_object() argument
1510 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) { in vu_lookup_shared_object()
1514 pthread_mutex_lock(&dev->backend_mutex); in vu_lookup_shared_object()
1515 if (!vu_message_write(dev, dev->backend_fd, &msg)) { in vu_lookup_shared_object()
1519 if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) { in vu_lookup_shared_object()
1538 pthread_mutex_unlock(&dev->backend_mutex); in vu_lookup_shared_object()
1544 vu_send_message(VuDev *dev, VhostUserMsg *vmsg) in vu_send_message() argument
1547 pthread_mutex_lock(&dev->backend_mutex); in vu_send_message()
1548 if (!vu_message_write(dev, dev->backend_fd, vmsg)) { in vu_send_message()
1554 pthread_mutex_unlock(&dev->backend_mutex); in vu_send_message()
1560 vu_add_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]) in vu_add_shared_object() argument
1570 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) { in vu_add_shared_object()
1574 return vu_send_message(dev, &msg); in vu_add_shared_object()
1578 vu_rm_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]) in vu_rm_shared_object() argument
1588 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) { in vu_rm_shared_object()
1592 return vu_send_message(dev, &msg); in vu_rm_shared_object()
1596 vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_vring_call_exec() argument
1603 if (!vu_check_queue_msg_file(dev, vmsg)) { in vu_set_vring_call_exec()
1607 if (dev->vq[index].call_fd != -1) { in vu_set_vring_call_exec()
1608 close(dev->vq[index].call_fd); in vu_set_vring_call_exec()
1609 dev->vq[index].call_fd = -1; in vu_set_vring_call_exec()
1612 dev->vq[index].call_fd = nofd ? -1 : vmsg->fds[0]; in vu_set_vring_call_exec()
1615 if (dev->vq[index].call_fd != -1 && eventfd_write(vmsg->fds[0], 1)) { in vu_set_vring_call_exec()
1619 DPRINT("Got call_fd: %d for vq: %d\n", dev->vq[index].call_fd, index); in vu_set_vring_call_exec()
1625 vu_set_vring_err_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_vring_err_exec() argument
1632 if (!vu_check_queue_msg_file(dev, vmsg)) { in vu_set_vring_err_exec()
1636 if (dev->vq[index].err_fd != -1) { in vu_set_vring_err_exec()
1637 close(dev->vq[index].err_fd); in vu_set_vring_err_exec()
1638 dev->vq[index].err_fd = -1; in vu_set_vring_err_exec()
1641 dev->vq[index].err_fd = nofd ? -1 : vmsg->fds[0]; in vu_set_vring_err_exec()
1647 vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_get_protocol_features_exec() argument
1669 if (dev->iface->get_config && dev->iface->set_config) { in vu_get_protocol_features_exec()
1673 if (dev->iface->get_protocol_features) { in vu_get_protocol_features_exec()
1674 features |= dev->iface->get_protocol_features(dev); in vu_get_protocol_features_exec()
1693 vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_protocol_features_exec() argument
1699 dev->protocol_features = vmsg->payload.u64; in vu_set_protocol_features_exec()
1701 if (vu_has_protocol_feature(dev, in vu_set_protocol_features_exec()
1703 (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ) || in vu_set_protocol_features_exec()
1704 !vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_REPLY_ACK))) { in vu_set_protocol_features_exec()
1715 vu_panic(dev, in vu_set_protocol_features_exec()
1720 if (dev->iface->set_protocol_features) { in vu_set_protocol_features_exec()
1721 dev->iface->set_protocol_features(dev, features); in vu_set_protocol_features_exec()
1728 vu_get_queue_num_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_get_queue_num_exec() argument
1730 vmsg_set_reply_u64(vmsg, dev->max_queues); in vu_get_queue_num_exec()
1735 vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg) in vu_set_vring_enable_exec() argument
1743 if (index >= dev->max_queues) { in vu_set_vring_enable_exec()
1744 vu_panic(dev, "Invalid vring_enable index: %u", index); in vu_set_vring_enable_exec()
1748 dev->vq[index].enable = enable; in vu_set_vring_enable_exec()
1753 vu_set_backend_req_fd(VuDev *dev, VhostUserMsg *vmsg) in vu_set_backend_req_fd() argument
1756 vu_panic(dev, "Invalid backend_req_fd message (%d fd's)", vmsg->fd_num); in vu_set_backend_req_fd()
1760 if (dev->backend_fd != -1) { in vu_set_backend_req_fd()
1761 close(dev->backend_fd); in vu_set_backend_req_fd()
1763 dev->backend_fd = vmsg->fds[0]; in vu_set_backend_req_fd()
1770 vu_get_config(VuDev *dev, VhostUserMsg *vmsg) in vu_get_config() argument
1774 if (dev->iface->get_config) { in vu_get_config()
1775 ret = dev->iface->get_config(dev, vmsg->payload.config.region, in vu_get_config()
1788 vu_set_config(VuDev *dev, VhostUserMsg *vmsg) in vu_set_config() argument
1792 if (dev->iface->set_config) { in vu_set_config()
1793 ret = dev->iface->set_config(dev, vmsg->payload.config.region, in vu_set_config()
1798 vu_panic(dev, "Set virtio configuration space failed"); in vu_set_config()
1806 vu_set_postcopy_advise(VuDev *dev, VhostUserMsg *vmsg) in vu_set_postcopy_advise() argument
1811 dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); in vu_set_postcopy_advise()
1814 dev->postcopy_ufd = -1; in vu_set_postcopy_advise()
1817 if (dev->postcopy_ufd == -1) { in vu_set_postcopy_advise()
1818 vu_panic(dev, "Userfaultfd not available: %s", strerror(errno)); in vu_set_postcopy_advise()
1825 if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) { in vu_set_postcopy_advise()
1826 vu_panic(dev, "Failed UFFDIO_API: %s", strerror(errno)); in vu_set_postcopy_advise()
1827 close(dev->postcopy_ufd); in vu_set_postcopy_advise()
1828 dev->postcopy_ufd = -1; in vu_set_postcopy_advise()
1837 vmsg->fds[0] = dev->postcopy_ufd; in vu_set_postcopy_advise()
1842 vu_set_postcopy_listen(VuDev *dev, VhostUserMsg *vmsg) in vu_set_postcopy_listen() argument
1844 if (dev->nregions) { in vu_set_postcopy_listen()
1845 vu_panic(dev, "Regions already registered at postcopy-listen"); in vu_set_postcopy_listen()
1849 dev->postcopy_listening = true; in vu_set_postcopy_listen()
1856 vu_set_postcopy_end(VuDev *dev, VhostUserMsg *vmsg) in vu_set_postcopy_end() argument
1859 dev->postcopy_listening = false; in vu_set_postcopy_end()
1860 if (dev->postcopy_ufd > 0) { in vu_set_postcopy_end()
1861 close(dev->postcopy_ufd); in vu_set_postcopy_end()
1862 dev->postcopy_ufd = -1; in vu_set_postcopy_end()
1913 vu_get_inflight_fd(VuDev *dev, VhostUserMsg *vmsg) in vu_get_inflight_fd() argument
1921 vu_panic(dev, "Invalid get_inflight_fd message:%d", vmsg->size); in vu_get_inflight_fd()
1939 vu_panic(dev, "Not implemented: memfd support is missing"); in vu_get_inflight_fd()
1943 vu_panic(dev, "Failed to alloc vhost inflight area"); in vu_get_inflight_fd()
1950 dev->inflight_info.addr = addr; in vu_get_inflight_fd()
1951 dev->inflight_info.size = vmsg->payload.inflight.mmap_size = mmap_size; in vu_get_inflight_fd()
1952 dev->inflight_info.fd = vmsg->fds[0] = fd; in vu_get_inflight_fd()
1965 vu_set_inflight_fd(VuDev *dev, VhostUserMsg *vmsg) in vu_set_inflight_fd() argument
1974 vu_panic(dev, "Invalid set_inflight_fd message size:%d fds:%d", in vu_set_inflight_fd()
1994 vu_panic(dev, "set_inflight_fd mmap error: %s", strerror(errno)); in vu_set_inflight_fd()
1998 if (dev->inflight_info.fd) { in vu_set_inflight_fd()
1999 close(dev->inflight_info.fd); in vu_set_inflight_fd()
2002 if (dev->inflight_info.addr) { in vu_set_inflight_fd()
2003 munmap(dev->inflight_info.addr, dev->inflight_info.size); in vu_set_inflight_fd()
2006 dev->inflight_info.fd = fd; in vu_set_inflight_fd()
2007 dev->inflight_info.addr = rc; in vu_set_inflight_fd()
2008 dev->inflight_info.size = mmap_size; in vu_set_inflight_fd()
2011 dev->vq[i].inflight = (VuVirtqInflight *)rc; in vu_set_inflight_fd()
2012 dev->vq[i].inflight->desc_num = queue_size; in vu_set_inflight_fd()
2020 vu_handle_vring_kick(VuDev *dev, VhostUserMsg *vmsg) in vu_handle_vring_kick() argument
2024 if (index >= dev->max_queues) { in vu_handle_vring_kick()
2025 vu_panic(dev, "Invalid queue index: %u", index); in vu_handle_vring_kick()
2030 dev->vq[index].handler, index); in vu_handle_vring_kick()
2032 if (!dev->vq[index].started) { in vu_handle_vring_kick()
2033 dev->vq[index].started = true; in vu_handle_vring_kick()
2035 if (dev->iface->queue_set_started) { in vu_handle_vring_kick()
2036 dev->iface->queue_set_started(dev, index, true); in vu_handle_vring_kick()
2040 if (dev->vq[index].handler) { in vu_handle_vring_kick()
2041 dev->vq[index].handler(dev, index); in vu_handle_vring_kick()
2047 static bool vu_handle_get_max_memslots(VuDev *dev, VhostUserMsg *vmsg) in vu_handle_get_max_memslots() argument
2057 vu_process_message(VuDev *dev, VhostUserMsg *vmsg) in vu_process_message() argument
2077 if (dev->iface->process_msg && in vu_process_message()
2078 dev->iface->process_msg(dev, vmsg, &do_reply)) { in vu_process_message()
2084 return vu_get_features_exec(dev, vmsg); in vu_process_message()
2086 return vu_set_features_exec(dev, vmsg); in vu_process_message()
2088 return vu_get_protocol_features_exec(dev, vmsg); in vu_process_message()
2090 return vu_set_protocol_features_exec(dev, vmsg); in vu_process_message()
2092 return vu_set_owner_exec(dev, vmsg); in vu_process_message()
2094 return vu_reset_device_exec(dev, vmsg); in vu_process_message()
2096 return vu_set_mem_table_exec(dev, vmsg); in vu_process_message()
2098 return vu_set_log_base_exec(dev, vmsg); in vu_process_message()
2100 return vu_set_log_fd_exec(dev, vmsg); in vu_process_message()
2102 return vu_set_vring_num_exec(dev, vmsg); in vu_process_message()
2104 return vu_set_vring_addr_exec(dev, vmsg); in vu_process_message()
2106 return vu_set_vring_base_exec(dev, vmsg); in vu_process_message()
2108 return vu_get_vring_base_exec(dev, vmsg); in vu_process_message()
2110 return vu_set_vring_kick_exec(dev, vmsg); in vu_process_message()
2112 return vu_set_vring_call_exec(dev, vmsg); in vu_process_message()
2114 return vu_set_vring_err_exec(dev, vmsg); in vu_process_message()
2116 return vu_get_queue_num_exec(dev, vmsg); in vu_process_message()
2118 return vu_set_vring_enable_exec(dev, vmsg); in vu_process_message()
2120 return vu_set_backend_req_fd(dev, vmsg); in vu_process_message()
2122 return vu_get_config(dev, vmsg); in vu_process_message()
2124 return vu_set_config(dev, vmsg); in vu_process_message()
2129 return vu_set_postcopy_advise(dev, vmsg); in vu_process_message()
2131 return vu_set_postcopy_listen(dev, vmsg); in vu_process_message()
2133 return vu_set_postcopy_end(dev, vmsg); in vu_process_message()
2135 return vu_get_inflight_fd(dev, vmsg); in vu_process_message()
2137 return vu_set_inflight_fd(dev, vmsg); in vu_process_message()
2139 return vu_handle_vring_kick(dev, vmsg); in vu_process_message()
2141 return vu_handle_get_max_memslots(dev, vmsg); in vu_process_message()
2143 return vu_add_mem_reg(dev, vmsg); in vu_process_message()
2145 return vu_rem_mem_reg(dev, vmsg); in vu_process_message()
2147 return vu_get_shared_object(dev, vmsg); in vu_process_message()
2150 vu_panic(dev, "Unhandled request: %d", vmsg->request); in vu_process_message()
2157 vu_dispatch(VuDev *dev) in vu_dispatch() argument
2163 if (!dev->read_msg(dev, dev->sock, &vmsg)) { in vu_dispatch()
2169 reply_requested = vu_process_message(dev, &vmsg); in vu_dispatch()
2180 if (!vu_send_reply(dev, dev->sock, &vmsg)) { in vu_dispatch()
2192 vu_deinit(VuDev *dev) in vu_deinit() argument
2196 vu_remove_all_mem_regs(dev); in vu_deinit()
2198 for (i = 0; i < dev->max_queues; i++) { in vu_deinit()
2199 VuVirtq *vq = &dev->vq[i]; in vu_deinit()
2207 dev->remove_watch(dev, vq->kick_fd); in vu_deinit()
2225 if (dev->inflight_info.addr) { in vu_deinit()
2226 munmap(dev->inflight_info.addr, dev->inflight_info.size); in vu_deinit()
2227 dev->inflight_info.addr = NULL; in vu_deinit()
2230 if (dev->inflight_info.fd > 0) { in vu_deinit()
2231 close(dev->inflight_info.fd); in vu_deinit()
2232 dev->inflight_info.fd = -1; in vu_deinit()
2235 vu_close_log(dev); in vu_deinit()
2236 if (dev->backend_fd != -1) { in vu_deinit()
2237 close(dev->backend_fd); in vu_deinit()
2238 dev->backend_fd = -1; in vu_deinit()
2240 pthread_mutex_destroy(&dev->backend_mutex); in vu_deinit()
2242 if (dev->sock != -1) { in vu_deinit()
2243 close(dev->sock); in vu_deinit()
2246 free(dev->vq); in vu_deinit()
2247 dev->vq = NULL; in vu_deinit()
2248 free(dev->regions); in vu_deinit()
2249 dev->regions = NULL; in vu_deinit()
2253 vu_init(VuDev *dev, in vu_init() argument
2271 memset(dev, 0, sizeof(*dev)); in vu_init()
2273 dev->sock = socket; in vu_init()
2274 dev->panic = panic; in vu_init()
2275 dev->read_msg = read_msg ? read_msg : vu_message_read_default; in vu_init()
2276 dev->set_watch = set_watch; in vu_init()
2277 dev->remove_watch = remove_watch; in vu_init()
2278 dev->iface = iface; in vu_init()
2279 dev->log_call_fd = -1; in vu_init()
2280 pthread_mutex_init(&dev->backend_mutex, NULL); in vu_init()
2281 dev->backend_fd = -1; in vu_init()
2282 dev->max_queues = max_queues; in vu_init()
2284 dev->regions = malloc(VHOST_USER_MAX_RAM_SLOTS * sizeof(dev->regions[0])); in vu_init()
2285 if (!dev->regions) { in vu_init()
2290 dev->vq = malloc(max_queues * sizeof(dev->vq[0])); in vu_init()
2291 if (!dev->vq) { in vu_init()
2293 free(dev->regions); in vu_init()
2294 dev->regions = NULL; in vu_init()
2299 dev->vq[i] = (VuVirtq) { in vu_init()
2309 vu_get_queue(VuDev *dev, int qidx) in vu_get_queue() argument
2311 assert(qidx < dev->max_queues); in vu_get_queue()
2312 return &dev->vq[qidx]; in vu_get_queue()
2316 vu_queue_enabled(VuDev *dev, VuVirtq *vq) in vu_queue_enabled() argument
2322 vu_queue_started(const VuDev *dev, const VuVirtq *vq) in vu_queue_started() argument
2354 virtqueue_num_heads(VuDev *dev, VuVirtq *vq, unsigned int idx) in virtqueue_num_heads() argument
2360 vu_panic(dev, "Guest moved used index from %u to %u", in virtqueue_num_heads()
2374 virtqueue_get_head(VuDev *dev, VuVirtq *vq, in virtqueue_get_head() argument
2383 vu_panic(dev, "Guest says index %u is available", *head); in virtqueue_get_head()
2391 virtqueue_read_indirect_desc(VuDev *dev, struct vring_desc *desc, in virtqueue_read_indirect_desc() argument
2407 ori_desc = vu_gpa_to_va(dev, &read_len, addr); in virtqueue_read_indirect_desc()
2428 virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc, in virtqueue_read_next_desc() argument
2442 vu_panic(dev, "Desc next is %u", *next); in virtqueue_read_next_desc()
2450 vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes, in vu_queue_get_avail_bytes() argument
2461 if (!vu_is_vq_usable(dev, vq)) { in vu_queue_get_avail_bytes()
2465 while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) { in vu_queue_get_avail_bytes()
2474 if (!virtqueue_get_head(dev, vq, idx++, &i)) { in vu_queue_get_avail_bytes()
2481 vu_panic(dev, "Invalid size for indirect buffer table"); in vu_queue_get_avail_bytes()
2487 vu_panic(dev, "Looped descriptor"); in vu_queue_get_avail_bytes()
2497 desc = vu_gpa_to_va(dev, &read_len, desc_addr); in vu_queue_get_avail_bytes()
2501 if (!virtqueue_read_indirect_desc(dev, desc_buf, in vu_queue_get_avail_bytes()
2508 vu_panic(dev, "Invalid indirect buffer table"); in vu_queue_get_avail_bytes()
2517 vu_panic(dev, "Looped descriptor"); in vu_queue_get_avail_bytes()
2529 rc = virtqueue_read_next_desc(dev, desc, i, max, &i); in vu_queue_get_avail_bytes()
2560 vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes, in vu_queue_avail_bytes() argument
2565 vu_queue_get_avail_bytes(dev, vq, &in_total, &out_total, in vu_queue_avail_bytes()
2574 vu_queue_empty(VuDev *dev, VuVirtq *vq) in vu_queue_empty() argument
2576 if (!vu_is_vq_usable(dev, vq)) { in vu_queue_empty()
2588 vring_notify(VuDev *dev, VuVirtq *vq) in vring_notify() argument
2597 if (vu_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) && in vring_notify()
2598 !vq->inuse && vu_queue_empty(dev, vq)) { in vring_notify()
2602 if (!vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { in vring_notify()
2613 static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync) in _vu_queue_notify() argument
2615 if (!vu_is_vq_usable(dev, vq)) { in _vu_queue_notify()
2619 if (!vring_notify(dev, vq)) { in _vu_queue_notify()
2625 vu_has_protocol_feature(dev, in _vu_queue_notify()
2627 vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ)) { in _vu_queue_notify()
2633 .index = vq - dev->vq, in _vu_queue_notify()
2637 vu_has_protocol_feature(dev, in _vu_queue_notify()
2644 vu_message_write(dev, dev->backend_fd, &vmsg); in _vu_queue_notify()
2646 vu_message_read_default(dev, dev->backend_fd, &vmsg); in _vu_queue_notify()
2652 vu_panic(dev, "Error writing eventfd: %s", strerror(errno)); in _vu_queue_notify()
2656 void vu_queue_notify(VuDev *dev, VuVirtq *vq) in vu_queue_notify() argument
2658 _vu_queue_notify(dev, vq, false); in vu_queue_notify()
2661 void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq) in vu_queue_notify_sync() argument
2663 _vu_queue_notify(dev, vq, true); in vu_queue_notify_sync()
2666 void vu_config_change_msg(VuDev *dev) in vu_config_change_msg() argument
2673 vu_message_write(dev, dev->backend_fd, &vmsg); in vu_config_change_msg()
2709 vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable) in vu_queue_set_notification() argument
2712 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { in vu_queue_set_notification()
2726 virtqueue_map_desc(VuDev *dev, in virtqueue_map_desc() argument
2736 vu_panic(dev, "virtio: zero sized buffers are not allowed"); in virtqueue_map_desc()
2744 vu_panic(dev, "virtio: too many descriptors in indirect table"); in virtqueue_map_desc()
2748 iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa); in virtqueue_map_desc()
2750 vu_panic(dev, "virtio: invalid address for buffers"); in virtqueue_map_desc()
2786 vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz) in vu_queue_map_desc() argument
2801 vu_panic(dev, "Invalid size for indirect buffer table"); in vu_queue_map_desc()
2810 desc = vu_gpa_to_va(dev, &read_len, desc_addr); in vu_queue_map_desc()
2814 if (!virtqueue_read_indirect_desc(dev, desc_buf, in vu_queue_map_desc()
2821 vu_panic(dev, "Invalid indirect buffer table"); in vu_queue_map_desc()
2830 if (!virtqueue_map_desc(dev, &in_num, iov + out_num, in vu_queue_map_desc()
2838 vu_panic(dev, "Incorrect order for descriptors"); in vu_queue_map_desc()
2841 if (!virtqueue_map_desc(dev, &out_num, iov, in vu_queue_map_desc()
2851 vu_panic(dev, "Looped descriptor"); in vu_queue_map_desc()
2854 rc = virtqueue_read_next_desc(dev, desc, i, max, &i); in vu_queue_map_desc()
2858 vu_panic(dev, "read descriptor error"); in vu_queue_map_desc()
2879 vu_queue_inflight_get(VuDev *dev, VuVirtq *vq, int desc_idx) in vu_queue_inflight_get() argument
2881 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { in vu_queue_inflight_get()
2896 vu_queue_inflight_pre_put(VuDev *dev, VuVirtq *vq, int desc_idx) in vu_queue_inflight_pre_put() argument
2898 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { in vu_queue_inflight_pre_put()
2912 vu_queue_inflight_post_put(VuDev *dev, VuVirtq *vq, int desc_idx) in vu_queue_inflight_post_put() argument
2914 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { in vu_queue_inflight_post_put()
2934 vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz) in vu_queue_pop() argument
2940 if (!vu_is_vq_usable(dev, vq)) { in vu_queue_pop()
2946 elem = vu_queue_map_desc(dev, vq, vq->resubmit_list[i].index, sz); in vu_queue_pop()
2956 if (vu_queue_empty(dev, vq)) { in vu_queue_pop()
2966 vu_panic(dev, "Virtqueue size exceeded"); in vu_queue_pop()
2970 if (!virtqueue_get_head(dev, vq, vq->last_avail_idx++, &head)) { in vu_queue_pop()
2974 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { in vu_queue_pop()
2978 elem = vu_queue_map_desc(dev, vq, head, sz); in vu_queue_pop()
2986 vu_queue_inflight_get(dev, vq, head); in vu_queue_pop()
2992 vu_queue_detach_element(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, in vu_queue_detach_element() argument
3000 vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, in vu_queue_unpop() argument
3004 vu_queue_detach_element(dev, vq, elem, len); in vu_queue_unpop()
3008 vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num) in vu_queue_rewind() argument
3019 void vring_used_write(VuDev *dev, VuVirtq *vq, in vring_used_write() argument
3025 vu_log_write(dev, vq->vring.log_guest_addr + in vring_used_write()
3032 vu_log_queue_fill(VuDev *dev, VuVirtq *vq, in vu_log_queue_fill() argument
3047 vu_panic(dev, "Invalid size for indirect buffer table"); in vu_log_queue_fill()
3056 desc = vu_gpa_to_va(dev, &read_len, desc_addr); in vu_log_queue_fill()
3060 if (!virtqueue_read_indirect_desc(dev, desc_buf, in vu_log_queue_fill()
3067 vu_panic(dev, "Invalid indirect buffer table"); in vu_log_queue_fill()
3075 vu_panic(dev, "Looped descriptor"); in vu_log_queue_fill()
3081 vu_log_write(dev, le64toh(desc[i].addr), min); in vu_log_queue_fill()
3086 (virtqueue_read_next_desc(dev, desc, i, max, &i) in vu_log_queue_fill()
3091 vu_queue_fill(VuDev *dev, VuVirtq *vq, in vu_queue_fill() argument
3097 if (!vu_is_vq_usable(dev, vq)) { in vu_queue_fill()
3101 vu_log_queue_fill(dev, vq, elem, len); in vu_queue_fill()
3107 vring_used_write(dev, vq, &uelem, idx); in vu_queue_fill()
3111 void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val) in vring_used_idx_set() argument
3114 vu_log_write(dev, in vring_used_idx_set()
3122 vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count) in vu_queue_flush() argument
3126 if (!vu_is_vq_usable(dev, vq)) { in vu_queue_flush()
3135 vring_used_idx_set(dev, vq, new); in vu_queue_flush()
3143 vu_queue_push(VuDev *dev, VuVirtq *vq, in vu_queue_push() argument
3146 vu_queue_fill(dev, vq, elem, len, 0); in vu_queue_push()
3147 vu_queue_inflight_pre_put(dev, vq, elem->index); in vu_queue_push()
3148 vu_queue_flush(dev, vq, 1); in vu_queue_push()
3149 vu_queue_inflight_post_put(dev, vq, elem->index); in vu_queue_push()