1 /* 2 * Copyright © 2018, 2021 Oracle and/or its affiliates. 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2 or later. 5 * See the COPYING file in the top-level directory. 6 * 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu-common.h" 11 12 #include "hw/remote/proxy.h" 13 #include "hw/pci/pci.h" 14 #include "qapi/error.h" 15 #include "io/channel-util.h" 16 #include "hw/qdev-properties.h" 17 #include "monitor/monitor.h" 18 #include "migration/blocker.h" 19 #include "qemu/sockets.h" 20 #include "hw/remote/mpqemu-link.h" 21 #include "qemu/error-report.h" 22 23 static void pci_proxy_dev_realize(PCIDevice *device, Error **errp) 24 { 25 ERRP_GUARD(); 26 PCIProxyDev *dev = PCI_PROXY_DEV(device); 27 int fd; 28 29 if (!dev->fd) { 30 error_setg(errp, "fd parameter not specified for %s", 31 DEVICE(device)->id); 32 return; 33 } 34 35 fd = monitor_fd_param(monitor_cur(), dev->fd, errp); 36 if (fd == -1) { 37 error_prepend(errp, "proxy: unable to parse fd %s: ", dev->fd); 38 return; 39 } 40 41 if (!fd_is_socket(fd)) { 42 error_setg(errp, "proxy: fd %d is not a socket", fd); 43 close(fd); 44 return; 45 } 46 47 dev->ioc = qio_channel_new_fd(fd, errp); 48 49 error_setg(&dev->migration_blocker, "%s does not support migration", 50 TYPE_PCI_PROXY_DEV); 51 migrate_add_blocker(dev->migration_blocker, errp); 52 53 qemu_mutex_init(&dev->io_mutex); 54 qio_channel_set_blocking(dev->ioc, true, NULL); 55 } 56 57 static void pci_proxy_dev_exit(PCIDevice *pdev) 58 { 59 PCIProxyDev *dev = PCI_PROXY_DEV(pdev); 60 61 if (dev->ioc) { 62 qio_channel_close(dev->ioc, NULL); 63 } 64 65 migrate_del_blocker(dev->migration_blocker); 66 67 error_free(dev->migration_blocker); 68 } 69 70 static void config_op_send(PCIProxyDev *pdev, uint32_t addr, uint32_t *val, 71 int len, unsigned int op) 72 { 73 MPQemuMsg msg = { 0 }; 74 uint64_t ret = -EINVAL; 75 Error *local_err = NULL; 76 77 msg.cmd = op; 78 msg.data.pci_conf_data.addr = addr; 79 msg.data.pci_conf_data.val = (op == MPQEMU_CMD_PCI_CFGWRITE) ? *val : 0; 80 msg.data.pci_conf_data.len = len; 81 msg.size = sizeof(PciConfDataMsg); 82 83 ret = mpqemu_msg_send_and_await_reply(&msg, pdev, &local_err); 84 if (local_err) { 85 error_report_err(local_err); 86 } 87 88 if (ret == UINT64_MAX) { 89 error_report("Failed to perform PCI config %s operation", 90 (op == MPQEMU_CMD_PCI_CFGREAD) ? "READ" : "WRITE"); 91 } 92 93 if (op == MPQEMU_CMD_PCI_CFGREAD) { 94 *val = (uint32_t)ret; 95 } 96 } 97 98 static uint32_t pci_proxy_read_config(PCIDevice *d, uint32_t addr, int len) 99 { 100 uint32_t val; 101 102 config_op_send(PCI_PROXY_DEV(d), addr, &val, len, MPQEMU_CMD_PCI_CFGREAD); 103 104 return val; 105 } 106 107 static void pci_proxy_write_config(PCIDevice *d, uint32_t addr, uint32_t val, 108 int len) 109 { 110 /* 111 * Some of the functions access the copy of remote device's PCI config 112 * space which is cached in the proxy device. Therefore, maintain 113 * it updated. 114 */ 115 pci_default_write_config(d, addr, val, len); 116 117 config_op_send(PCI_PROXY_DEV(d), addr, &val, len, MPQEMU_CMD_PCI_CFGWRITE); 118 } 119 120 static Property proxy_properties[] = { 121 DEFINE_PROP_STRING("fd", PCIProxyDev, fd), 122 DEFINE_PROP_END_OF_LIST(), 123 }; 124 125 static void pci_proxy_dev_class_init(ObjectClass *klass, void *data) 126 { 127 DeviceClass *dc = DEVICE_CLASS(klass); 128 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 129 130 k->realize = pci_proxy_dev_realize; 131 k->exit = pci_proxy_dev_exit; 132 k->config_read = pci_proxy_read_config; 133 k->config_write = pci_proxy_write_config; 134 135 device_class_set_props(dc, proxy_properties); 136 } 137 138 static const TypeInfo pci_proxy_dev_type_info = { 139 .name = TYPE_PCI_PROXY_DEV, 140 .parent = TYPE_PCI_DEVICE, 141 .instance_size = sizeof(PCIProxyDev), 142 .class_init = pci_proxy_dev_class_init, 143 .interfaces = (InterfaceInfo[]) { 144 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 145 { }, 146 }, 147 }; 148 149 static void pci_proxy_dev_register_types(void) 150 { 151 type_register_static(&pci_proxy_dev_type_info); 152 } 153 154 type_init(pci_proxy_dev_register_types) 155 156 static void send_bar_access_msg(PCIProxyDev *pdev, MemoryRegion *mr, 157 bool write, hwaddr addr, uint64_t *val, 158 unsigned size, bool memory) 159 { 160 MPQemuMsg msg = { 0 }; 161 long ret = -EINVAL; 162 Error *local_err = NULL; 163 164 msg.size = sizeof(BarAccessMsg); 165 msg.data.bar_access.addr = mr->addr + addr; 166 msg.data.bar_access.size = size; 167 msg.data.bar_access.memory = memory; 168 169 if (write) { 170 msg.cmd = MPQEMU_CMD_BAR_WRITE; 171 msg.data.bar_access.val = *val; 172 } else { 173 msg.cmd = MPQEMU_CMD_BAR_READ; 174 } 175 176 ret = mpqemu_msg_send_and_await_reply(&msg, pdev, &local_err); 177 if (local_err) { 178 error_report_err(local_err); 179 } 180 181 if (!write) { 182 *val = ret; 183 } 184 } 185 186 static void proxy_bar_write(void *opaque, hwaddr addr, uint64_t val, 187 unsigned size) 188 { 189 ProxyMemoryRegion *pmr = opaque; 190 191 send_bar_access_msg(pmr->dev, &pmr->mr, true, addr, &val, size, 192 pmr->memory); 193 } 194 195 static uint64_t proxy_bar_read(void *opaque, hwaddr addr, unsigned size) 196 { 197 ProxyMemoryRegion *pmr = opaque; 198 uint64_t val; 199 200 send_bar_access_msg(pmr->dev, &pmr->mr, false, addr, &val, size, 201 pmr->memory); 202 203 return val; 204 } 205 206 const MemoryRegionOps proxy_mr_ops = { 207 .read = proxy_bar_read, 208 .write = proxy_bar_write, 209 .endianness = DEVICE_NATIVE_ENDIAN, 210 .impl = { 211 .min_access_size = 1, 212 .max_access_size = 8, 213 }, 214 }; 215