Lines Matching +full:inter +full:- +full:data

2  * Inter-VM Shared Memory PCI device.
16 * Contributions after 2012-01-13 are licensed under the terms of the
25 #include "hw/qdev-properties.h"
26 #include "hw/qdev-properties-system.h"
32 #include "qemu/error-report.h"
36 #include "chardev/char-fe.h"
60 #define TYPE_IVSHMEM_COMMON "ivshmem-common"
65 #define TYPE_IVSHMEM_PLAIN "ivshmem-plain"
69 #define TYPE_IVSHMEM_DOORBELL "ivshmem-doorbell"
122 /* registers for the Inter-VM shared memory device */
132 return (ivs->features & (1 << feature)); in ivshmem_has_feature()
137 assert(s->master != ON_OFF_AUTO_AUTO); in ivshmem_is_master()
138 return s->master == ON_OFF_AUTO_ON; in ivshmem_is_master()
145 s->intrmask = val; in ivshmem_IntrMask_write()
150 uint32_t ret = s->intrmask; in ivshmem_IntrMask_read()
160 s->intrstatus = val; in ivshmem_IntrStatus_write()
165 uint32_t ret = s->intrstatus; in ivshmem_IntrStatus_read()
168 s->intrstatus = 0; in ivshmem_IntrStatus_read()
195 if (dest >= s->nb_peers) { in ivshmem_io_write()
201 if (vector < s->peers[dest].nb_eventfds) { in ivshmem_io_write()
203 event_notifier_set(&s->peers[dest].eventfds[vector]); in ivshmem_io_write()
232 ret = s->vm_id; in ivshmem_io_read()
256 PCIDevice *pdev = entry->pdev; in ivshmem_vector_notify()
258 int vector = entry - s->msi_vectors; in ivshmem_vector_notify()
259 EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; in ivshmem_vector_notify()
279 EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; in ivshmem_vector_unmask()
280 MSIVector *v = &s->msi_vectors[vector]; in ivshmem_vector_unmask()
284 if (!v->pdev) { in ivshmem_vector_unmask()
286 return -EINVAL; in ivshmem_vector_unmask()
288 assert(!v->unmasked); in ivshmem_vector_unmask()
290 ret = kvm_irqchip_update_msi_route(kvm_state, v->virq, msg, dev); in ivshmem_vector_unmask()
296 ret = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, v->virq); in ivshmem_vector_unmask()
300 v->unmasked = true; in ivshmem_vector_unmask()
308 EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; in ivshmem_vector_mask()
309 MSIVector *v = &s->msi_vectors[vector]; in ivshmem_vector_mask()
313 if (!v->pdev) { in ivshmem_vector_mask()
317 assert(v->unmasked); in ivshmem_vector_mask()
319 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, v->virq); in ivshmem_vector_mask()
324 v->unmasked = false; in ivshmem_vector_mask()
334 IVSHMEM_DPRINTF("vector poll %p %d-%d\n", dev, vector_start, vector_end); in ivshmem_vector_poll()
336 vector_end = MIN(vector_end, s->vectors); in ivshmem_vector_poll()
339 EventNotifier *notifier = &s->peers[s->vm_id].eventfds[vector]; in ivshmem_vector_poll()
356 assert(!s->msi_vectors[vector].pdev); in watch_vector_notifier()
357 s->msi_vectors[vector].pdev = PCI_DEVICE(s); in watch_vector_notifier()
360 NULL, &s->msi_vectors[vector]); in watch_vector_notifier()
365 memory_region_add_eventfd(&s->ivshmem_mmio, in ivshmem_add_eventfd()
370 &s->peers[posn].eventfds[i]); in ivshmem_add_eventfd()
375 memory_region_del_eventfd(&s->ivshmem_mmio, in ivshmem_del_eventfd()
380 &s->peers[posn].eventfds[i]); in ivshmem_del_eventfd()
387 assert(posn >= 0 && posn < s->nb_peers); in close_peer_eventfds()
388 n = s->peers[posn].nb_eventfds; in close_peer_eventfds()
399 event_notifier_cleanup(&s->peers[posn].eventfds[i]); in close_peer_eventfds()
402 g_free(s->peers[posn].eventfds); in close_peer_eventfds()
403 s->peers[posn].nb_eventfds = 0; in close_peer_eventfds()
408 int old_nb_peers = s->nb_peers; in resize_peers()
414 s->peers = g_renew(Peer, s->peers, nb_peers); in resize_peers()
415 s->nb_peers = nb_peers; in resize_peers()
418 s->peers[i].eventfds = g_new0(EventNotifier, s->vectors); in resize_peers()
419 s->peers[i].nb_eventfds = 0; in resize_peers()
431 assert(!s->msi_vectors[vector].pdev); in ivshmem_add_kvm_msi_virq()
441 s->msi_vectors[vector].virq = ret; in ivshmem_add_kvm_msi_virq()
442 s->msi_vectors[vector].pdev = pdev; in ivshmem_add_kvm_msi_virq()
447 EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; in setup_interrupt()
468 s->msi_vectors[vector].virq); in setup_interrupt()
487 if (s->ivshmem_bar2) { in process_msg_shmem()
503 if (!memory_region_init_ram_from_fd(&s->server_bar2, OBJECT(s), in process_msg_shmem()
509 s->ivshmem_bar2 = &s->server_bar2; in process_msg_shmem()
516 if (posn >= s->nb_peers || posn == s->vm_id) { in process_msg_disconnect()
526 Peer *peer = &s->peers[posn]; in process_msg_connect()
530 * The N-th connect message for this peer comes with the file in process_msg_connect()
531 * descriptor for vector N-1. Count messages to find the vector. in process_msg_connect()
533 if (peer->nb_eventfds >= s->vectors) { in process_msg_connect()
535 s->vectors); in process_msg_connect()
539 vector = peer->nb_eventfds++; in process_msg_connect()
542 event_notifier_init_fd(&peer->eventfds[vector], fd); in process_msg_connect()
545 if (posn == s->vm_id) { in process_msg_connect()
559 if (msg < -1 || msg > IVSHMEM_MAX_PEERS) { in process_msg()
567 if (msg == -1) { in process_msg()
572 if (msg >= s->nb_peers) { in process_msg()
587 assert(s->msg_buffered_bytes < sizeof(s->msg_buf)); in ivshmem_can_receive()
588 return sizeof(s->msg_buf) - s->msg_buffered_bytes; in ivshmem_can_receive()
598 assert(size >= 0 && s->msg_buffered_bytes + size <= sizeof(s->msg_buf)); in ivshmem_read()
599 memcpy((unsigned char *)&s->msg_buf + s->msg_buffered_bytes, buf, size); in ivshmem_read()
600 s->msg_buffered_bytes += size; in ivshmem_read()
601 if (s->msg_buffered_bytes < sizeof(s->msg_buf)) { in ivshmem_read()
604 msg = le64_to_cpu(s->msg_buf); in ivshmem_read()
605 s->msg_buffered_bytes = 0; in ivshmem_read()
607 fd = qemu_chr_fe_get_msgfd(&s->server_chr); in ivshmem_read()
622 ret = qemu_chr_fe_read_all(&s->server_chr, (uint8_t *)&msg + n, in ivshmem_recv_msg()
623 sizeof(msg) - n); in ivshmem_recv_msg()
625 if (ret == -EINTR) { in ivshmem_recv_msg()
628 error_setg_errno(errp, -ret, "read from server failed"); in ivshmem_recv_msg()
634 *pfd = qemu_chr_fe_get_msgfd(&s->server_chr); in ivshmem_recv_msg()
654 if (fd != -1) { in ivshmem_recv_setup()
660 * ivshmem-server sends the remaining initial messages in a fixed in ivshmem_recv_setup()
668 * right here, and ivshmem-server has always complied. However, in ivshmem_recv_setup()
677 if (fd != -1 || msg < 0 || msg > IVSHMEM_MAX_PEERS) { in ivshmem_recv_setup()
681 s->vm_id = msg; in ivshmem_recv_setup()
697 } while (msg != -1); in ivshmem_recv_setup()
705 assert(s->ivshmem_bar2); in ivshmem_recv_setup()
708 /* Select the MSI-X vectors used by device.
716 for (i = 0; i < s->vectors; i++) { in ivshmem_msix_vector_use()
729 s->intrstatus = 0; in ivshmem_reset()
730 s->intrmask = 0; in ivshmem_reset()
738 /* allocate QEMU callback data for receiving interrupts */ in ivshmem_setup_interrupts()
739 s->msi_vectors = g_new0(MSIVector, s->vectors); in ivshmem_setup_interrupts()
742 if (msix_init_exclusive_bar(PCI_DEVICE(s), s->vectors, 1, errp)) { in ivshmem_setup_interrupts()
743 return -1; in ivshmem_setup_interrupts()
746 IVSHMEM_DPRINTF("msix initialized (%d vectors)\n", s->vectors); in ivshmem_setup_interrupts()
757 if (s->msi_vectors[vector].pdev == NULL) { in ivshmem_remove_kvm_msi_virq()
762 kvm_irqchip_release_virq(kvm_state, s->msi_vectors[vector].virq); in ivshmem_remove_kvm_msi_virq()
764 s->msi_vectors[vector].pdev = NULL; in ivshmem_remove_kvm_msi_virq()
772 for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) { in ivshmem_enable_irqfd()
792 while (--i >= 0) { in ivshmem_enable_irqfd()
802 if (!pdev->msix_vector_use_notifier) { in ivshmem_disable_irqfd()
808 for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) { in ivshmem_disable_irqfd()
810 * MSI-X is already disabled here so msix_unset_vector_notifiers() in ivshmem_disable_irqfd()
814 if (s->msi_vectors[i].unmasked) { in ivshmem_disable_irqfd()
854 pci_conf = dev->config; in ivshmem_common_realize()
857 memory_region_init_io(&s->ivshmem_mmio, OBJECT(s), &ivshmem_mmio_ops, s, in ivshmem_common_realize()
858 "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE); in ivshmem_common_realize()
862 &s->ivshmem_mmio); in ivshmem_common_realize()
864 if (s->hostmem != NULL) { in ivshmem_common_realize()
867 s->ivshmem_bar2 = host_memory_backend_get_memory(s->hostmem); in ivshmem_common_realize()
868 host_memory_backend_set_mapped(s->hostmem, true); in ivshmem_common_realize()
870 Chardev *chr = qemu_chr_fe_get_driver(&s->server_chr); in ivshmem_common_realize()
874 chr->filename); in ivshmem_common_realize()
890 if (s->master == ON_OFF_AUTO_ON && s->vm_id != 0) { in ivshmem_common_realize()
896 qemu_chr_fe_set_handlers(&s->server_chr, ivshmem_can_receive, in ivshmem_common_realize()
905 if (s->master == ON_OFF_AUTO_AUTO) { in ivshmem_common_realize()
906 s->master = s->vm_id == 0 ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; in ivshmem_common_realize()
910 error_setg(&s->migration_blocker, in ivshmem_common_realize()
912 if (migrate_add_blocker(&s->migration_blocker, errp) < 0) { in ivshmem_common_realize()
917 vmstate_register_ram(s->ivshmem_bar2, DEVICE(s)); in ivshmem_common_realize()
922 s->ivshmem_bar2); in ivshmem_common_realize()
930 migrate_del_blocker(&s->migration_blocker); in ivshmem_exit()
932 if (memory_region_is_mapped(s->ivshmem_bar2)) { in ivshmem_exit()
933 if (!s->hostmem) { in ivshmem_exit()
934 void *addr = memory_region_get_ram_ptr(s->ivshmem_bar2); in ivshmem_exit()
937 if (munmap(addr, memory_region_size(s->ivshmem_bar2) == -1)) { in ivshmem_exit()
942 fd = memory_region_get_fd(s->ivshmem_bar2); in ivshmem_exit()
946 vmstate_unregister_ram(s->ivshmem_bar2, DEVICE(dev)); in ivshmem_exit()
949 if (s->hostmem) { in ivshmem_exit()
950 host_memory_backend_set_mapped(s->hostmem, false); in ivshmem_exit()
953 if (s->peers) { in ivshmem_exit()
954 for (i = 0; i < s->nb_peers; i++) { in ivshmem_exit()
957 g_free(s->peers); in ivshmem_exit()
964 g_free(s->msi_vectors); in ivshmem_exit()
973 return -EINVAL; in ivshmem_pre_load()
989 static void ivshmem_common_class_init(ObjectClass *klass, const void *data) in ivshmem_common_class_init() argument
994 k->realize = ivshmem_common_realize; in ivshmem_common_class_init()
995 k->exit = ivshmem_exit; in ivshmem_common_class_init()
996 k->config_write = ivshmem_write_config; in ivshmem_common_class_init()
997 k->vendor_id = PCI_VENDOR_ID_IVSHMEM; in ivshmem_common_class_init()
998 k->device_id = PCI_DEVICE_ID_IVSHMEM; in ivshmem_common_class_init()
999 k->class_id = PCI_CLASS_MEMORY_RAM; in ivshmem_common_class_init()
1000 k->revision = 1; in ivshmem_common_class_init()
1002 set_bit(DEVICE_CATEGORY_MISC, dc->categories); in ivshmem_common_class_init()
1003 dc->desc = "Inter-VM shared memory"; in ivshmem_common_class_init()
1042 if (!s->hostmem) { in ivshmem_plain_realize()
1045 } else if (host_memory_backend_is_mapped(s->hostmem)) { in ivshmem_plain_realize()
1047 object_get_canonical_path_component(OBJECT(s->hostmem))); in ivshmem_plain_realize()
1054 static void ivshmem_plain_class_init(ObjectClass *klass, const void *data) in ivshmem_plain_class_init() argument
1059 k->realize = ivshmem_plain_realize; in ivshmem_plain_class_init()
1061 dc->vmsd = &ivshmem_plain_vmsd; in ivshmem_plain_class_init()
1098 s->features |= (1 << IVSHMEM_MSI); in ivshmem_doorbell_init()
1105 if (!qemu_chr_fe_backend_connected(&s->server_chr)) { in ivshmem_doorbell_realize()
1113 static void ivshmem_doorbell_class_init(ObjectClass *klass, const void *data) in ivshmem_doorbell_class_init() argument
1118 k->realize = ivshmem_doorbell_realize; in ivshmem_doorbell_class_init()
1120 dc->vmsd = &ivshmem_doorbell_vmsd; in ivshmem_doorbell_class_init()