1 /* 2 * Copyright © 2018, 2021 Oracle and/or its affiliates. 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2 or later. 5 * See the COPYING file in the top-level directory. 6 * 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu-common.h" 11 12 #include "hw/remote/proxy.h" 13 #include "hw/pci/pci.h" 14 #include "qapi/error.h" 15 #include "io/channel-util.h" 16 #include "hw/qdev-properties.h" 17 #include "monitor/monitor.h" 18 #include "migration/blocker.h" 19 #include "qemu/sockets.h" 20 #include "hw/remote/mpqemu-link.h" 21 #include "qemu/error-report.h" 22 #include "hw/remote/proxy-memory-listener.h" 23 #include "qom/object.h" 24 25 static void pci_proxy_dev_realize(PCIDevice *device, Error **errp) 26 { 27 ERRP_GUARD(); 28 PCIProxyDev *dev = PCI_PROXY_DEV(device); 29 int fd; 30 31 if (!dev->fd) { 32 error_setg(errp, "fd parameter not specified for %s", 33 DEVICE(device)->id); 34 return; 35 } 36 37 fd = monitor_fd_param(monitor_cur(), dev->fd, errp); 38 if (fd == -1) { 39 error_prepend(errp, "proxy: unable to parse fd %s: ", dev->fd); 40 return; 41 } 42 43 if (!fd_is_socket(fd)) { 44 error_setg(errp, "proxy: fd %d is not a socket", fd); 45 close(fd); 46 return; 47 } 48 49 dev->ioc = qio_channel_new_fd(fd, errp); 50 51 error_setg(&dev->migration_blocker, "%s does not support migration", 52 TYPE_PCI_PROXY_DEV); 53 migrate_add_blocker(dev->migration_blocker, errp); 54 55 qemu_mutex_init(&dev->io_mutex); 56 qio_channel_set_blocking(dev->ioc, true, NULL); 57 58 proxy_memory_listener_configure(&dev->proxy_listener, dev->ioc); 59 } 60 61 static void pci_proxy_dev_exit(PCIDevice *pdev) 62 { 63 PCIProxyDev *dev = PCI_PROXY_DEV(pdev); 64 65 if (dev->ioc) { 66 qio_channel_close(dev->ioc, NULL); 67 } 68 69 migrate_del_blocker(dev->migration_blocker); 70 71 error_free(dev->migration_blocker); 72 73 proxy_memory_listener_deconfigure(&dev->proxy_listener); 74 } 75 76 static void config_op_send(PCIProxyDev *pdev, uint32_t addr, uint32_t *val, 77 int len, unsigned int op) 78 { 79 MPQemuMsg msg = { 0 }; 80 uint64_t ret = -EINVAL; 81 Error *local_err = NULL; 82 83 msg.cmd = op; 84 msg.data.pci_conf_data.addr = addr; 85 msg.data.pci_conf_data.val = (op == MPQEMU_CMD_PCI_CFGWRITE) ? *val : 0; 86 msg.data.pci_conf_data.len = len; 87 msg.size = sizeof(PciConfDataMsg); 88 89 ret = mpqemu_msg_send_and_await_reply(&msg, pdev, &local_err); 90 if (local_err) { 91 error_report_err(local_err); 92 } 93 94 if (ret == UINT64_MAX) { 95 error_report("Failed to perform PCI config %s operation", 96 (op == MPQEMU_CMD_PCI_CFGREAD) ? "READ" : "WRITE"); 97 } 98 99 if (op == MPQEMU_CMD_PCI_CFGREAD) { 100 *val = (uint32_t)ret; 101 } 102 } 103 104 static uint32_t pci_proxy_read_config(PCIDevice *d, uint32_t addr, int len) 105 { 106 uint32_t val; 107 108 config_op_send(PCI_PROXY_DEV(d), addr, &val, len, MPQEMU_CMD_PCI_CFGREAD); 109 110 return val; 111 } 112 113 static void pci_proxy_write_config(PCIDevice *d, uint32_t addr, uint32_t val, 114 int len) 115 { 116 /* 117 * Some of the functions access the copy of remote device's PCI config 118 * space which is cached in the proxy device. Therefore, maintain 119 * it updated. 120 */ 121 pci_default_write_config(d, addr, val, len); 122 123 config_op_send(PCI_PROXY_DEV(d), addr, &val, len, MPQEMU_CMD_PCI_CFGWRITE); 124 } 125 126 static Property proxy_properties[] = { 127 DEFINE_PROP_STRING("fd", PCIProxyDev, fd), 128 DEFINE_PROP_END_OF_LIST(), 129 }; 130 131 static void pci_proxy_dev_class_init(ObjectClass *klass, void *data) 132 { 133 DeviceClass *dc = DEVICE_CLASS(klass); 134 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 135 136 k->realize = pci_proxy_dev_realize; 137 k->exit = pci_proxy_dev_exit; 138 k->config_read = pci_proxy_read_config; 139 k->config_write = pci_proxy_write_config; 140 141 device_class_set_props(dc, proxy_properties); 142 } 143 144 static const TypeInfo pci_proxy_dev_type_info = { 145 .name = TYPE_PCI_PROXY_DEV, 146 .parent = TYPE_PCI_DEVICE, 147 .instance_size = sizeof(PCIProxyDev), 148 .class_init = pci_proxy_dev_class_init, 149 .interfaces = (InterfaceInfo[]) { 150 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 151 { }, 152 }, 153 }; 154 155 static void pci_proxy_dev_register_types(void) 156 { 157 type_register_static(&pci_proxy_dev_type_info); 158 } 159 160 type_init(pci_proxy_dev_register_types) 161 162 static void send_bar_access_msg(PCIProxyDev *pdev, MemoryRegion *mr, 163 bool write, hwaddr addr, uint64_t *val, 164 unsigned size, bool memory) 165 { 166 MPQemuMsg msg = { 0 }; 167 long ret = -EINVAL; 168 Error *local_err = NULL; 169 170 msg.size = sizeof(BarAccessMsg); 171 msg.data.bar_access.addr = mr->addr + addr; 172 msg.data.bar_access.size = size; 173 msg.data.bar_access.memory = memory; 174 175 if (write) { 176 msg.cmd = MPQEMU_CMD_BAR_WRITE; 177 msg.data.bar_access.val = *val; 178 } else { 179 msg.cmd = MPQEMU_CMD_BAR_READ; 180 } 181 182 ret = mpqemu_msg_send_and_await_reply(&msg, pdev, &local_err); 183 if (local_err) { 184 error_report_err(local_err); 185 } 186 187 if (!write) { 188 *val = ret; 189 } 190 } 191 192 static void proxy_bar_write(void *opaque, hwaddr addr, uint64_t val, 193 unsigned size) 194 { 195 ProxyMemoryRegion *pmr = opaque; 196 197 send_bar_access_msg(pmr->dev, &pmr->mr, true, addr, &val, size, 198 pmr->memory); 199 } 200 201 static uint64_t proxy_bar_read(void *opaque, hwaddr addr, unsigned size) 202 { 203 ProxyMemoryRegion *pmr = opaque; 204 uint64_t val; 205 206 send_bar_access_msg(pmr->dev, &pmr->mr, false, addr, &val, size, 207 pmr->memory); 208 209 return val; 210 } 211 212 const MemoryRegionOps proxy_mr_ops = { 213 .read = proxy_bar_read, 214 .write = proxy_bar_write, 215 .endianness = DEVICE_NATIVE_ENDIAN, 216 .impl = { 217 .min_access_size = 1, 218 .max_access_size = 8, 219 }, 220 }; 221