1 /* 2 * graphics passthrough 3 */ 4 #include "qemu/osdep.h" 5 #include "qapi/error.h" 6 #include "hw/xen/xen_pt.h" 7 #include "hw/xen/xen_igd.h" 8 #include "xen-host-pci-device.h" 9 10 static unsigned long igd_guest_opregion; 11 static unsigned long igd_host_opregion; 12 13 #define XEN_PCI_INTEL_OPREGION_MASK 0xfff 14 15 typedef struct VGARegion { 16 int type; /* Memory or port I/O */ 17 uint64_t guest_base_addr; 18 uint64_t machine_base_addr; 19 uint64_t size; /* size of the region */ 20 int rc; 21 } VGARegion; 22 23 #define IORESOURCE_IO 0x00000100 24 #define IORESOURCE_MEM 0x00000200 25 26 static struct VGARegion vga_args[] = { 27 { 28 .type = IORESOURCE_IO, 29 .guest_base_addr = 0x3B0, 30 .machine_base_addr = 0x3B0, 31 .size = 0xC, 32 .rc = -1, 33 }, 34 { 35 .type = IORESOURCE_IO, 36 .guest_base_addr = 0x3C0, 37 .machine_base_addr = 0x3C0, 38 .size = 0x20, 39 .rc = -1, 40 }, 41 { 42 .type = IORESOURCE_MEM, 43 .guest_base_addr = 0xa0000 >> XC_PAGE_SHIFT, 44 .machine_base_addr = 0xa0000 >> XC_PAGE_SHIFT, 45 .size = 0x20, 46 .rc = -1, 47 }, 48 }; 49 50 /* 51 * register VGA resources for the domain with assigned gfx 52 */ 53 int xen_pt_register_vga_regions(XenHostPCIDevice *dev) 54 { 55 int i = 0; 56 57 if (!is_igd_vga_passthrough(dev)) { 58 return 0; 59 } 60 61 for (i = 0 ; i < ARRAY_SIZE(vga_args); i++) { 62 if (vga_args[i].type == IORESOURCE_IO) { 63 vga_args[i].rc = xc_domain_ioport_mapping(xen_xc, xen_domid, 64 vga_args[i].guest_base_addr, 65 vga_args[i].machine_base_addr, 66 vga_args[i].size, DPCI_ADD_MAPPING); 67 } else { 68 vga_args[i].rc = xc_domain_memory_mapping(xen_xc, xen_domid, 69 vga_args[i].guest_base_addr, 70 vga_args[i].machine_base_addr, 71 vga_args[i].size, DPCI_ADD_MAPPING); 72 } 73 74 if (vga_args[i].rc) { 75 XEN_PT_ERR(NULL, "VGA %s mapping failed! (rc: %i)\n", 76 vga_args[i].type == IORESOURCE_IO ? "ioport" : "memory", 77 vga_args[i].rc); 78 return vga_args[i].rc; 79 } 80 } 81 82 return 0; 83 } 84 85 /* 86 * unregister VGA resources for the domain with assigned gfx 87 */ 88 int xen_pt_unregister_vga_regions(XenHostPCIDevice *dev) 89 { 90 int i = 0; 91 int ret = 0; 92 93 if (!is_igd_vga_passthrough(dev)) { 94 return 0; 95 } 96 97 for (i = 0 ; i < ARRAY_SIZE(vga_args); i++) { 98 if (vga_args[i].type == IORESOURCE_IO) { 99 vga_args[i].rc = xc_domain_ioport_mapping(xen_xc, xen_domid, 100 vga_args[i].guest_base_addr, 101 vga_args[i].machine_base_addr, 102 vga_args[i].size, DPCI_REMOVE_MAPPING); 103 } else { 104 vga_args[i].rc = xc_domain_memory_mapping(xen_xc, xen_domid, 105 vga_args[i].guest_base_addr, 106 vga_args[i].machine_base_addr, 107 vga_args[i].size, DPCI_REMOVE_MAPPING); 108 } 109 110 if (vga_args[i].rc) { 111 XEN_PT_ERR(NULL, "VGA %s unmapping failed! (rc: %i)\n", 112 vga_args[i].type == IORESOURCE_IO ? "ioport" : "memory", 113 vga_args[i].rc); 114 return vga_args[i].rc; 115 } 116 } 117 118 if (igd_guest_opregion) { 119 ret = xc_domain_memory_mapping(xen_xc, xen_domid, 120 (unsigned long)(igd_guest_opregion >> XC_PAGE_SHIFT), 121 (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT), 122 3, 123 DPCI_REMOVE_MAPPING); 124 if (ret) { 125 return ret; 126 } 127 } 128 129 return 0; 130 } 131 132 static void *get_vgabios(XenPCIPassthroughState *s, int *size, 133 XenHostPCIDevice *dev) 134 { 135 return pci_assign_dev_load_option_rom(&s->dev, size, 136 dev->domain, dev->bus, 137 dev->dev, dev->func); 138 } 139 140 /* Refer to Seabios. */ 141 struct rom_header { 142 uint16_t signature; 143 uint8_t size; 144 uint8_t initVector[4]; 145 uint8_t reserved[17]; 146 uint16_t pcioffset; 147 uint16_t pnpoffset; 148 } __attribute__((packed)); 149 150 struct pci_data { 151 uint32_t signature; 152 uint16_t vendor; 153 uint16_t device; 154 uint16_t vitaldata; 155 uint16_t dlen; 156 uint8_t drevision; 157 uint8_t class_lo; 158 uint16_t class_hi; 159 uint16_t ilen; 160 uint16_t irevision; 161 uint8_t type; 162 uint8_t indicator; 163 uint16_t reserved; 164 } __attribute__((packed)); 165 166 void xen_pt_setup_vga(XenPCIPassthroughState *s, XenHostPCIDevice *dev, 167 Error **errp) 168 { 169 unsigned char *bios = NULL; 170 struct rom_header *rom; 171 int bios_size; 172 char *c = NULL; 173 char checksum = 0; 174 uint32_t len = 0; 175 struct pci_data *pd = NULL; 176 177 if (!is_igd_vga_passthrough(dev)) { 178 error_setg(errp, "Need to enable igd-passthrough"); 179 return; 180 } 181 182 bios = get_vgabios(s, &bios_size, dev); 183 if (!bios) { 184 error_setg(errp, "VGA: Can't get VBIOS"); 185 return; 186 } 187 188 if (bios_size < sizeof(struct rom_header)) { 189 error_setg(errp, "VGA: VBIOS image corrupt (too small)"); 190 return; 191 } 192 193 /* Currently we fixed this address as a primary. */ 194 rom = (struct rom_header *)bios; 195 196 if (rom->pcioffset + sizeof(struct pci_data) > bios_size) { 197 error_setg(errp, "VGA: VBIOS image corrupt (bad pcioffset field)"); 198 return; 199 } 200 201 pd = (void *)(bios + (unsigned char)rom->pcioffset); 202 203 /* We may need to fixup Device Identification. */ 204 if (pd->device != s->real_device.device_id) { 205 pd->device = s->real_device.device_id; 206 207 len = rom->size * 512; 208 if (len > bios_size) { 209 error_setg(errp, "VGA: VBIOS image corrupt (bad size field)"); 210 return; 211 } 212 213 /* Then adjust the bios checksum */ 214 for (c = (char *)bios; c < ((char *)bios + len); c++) { 215 checksum += *c; 216 } 217 if (checksum) { 218 bios[len - 1] -= checksum; 219 XEN_PT_LOG(&s->dev, "vga bios checksum is adjusted %x!\n", 220 checksum); 221 } 222 } 223 224 /* Currently we fixed this address as a primary for legacy BIOS. */ 225 cpu_physical_memory_write(0xc0000, bios, bios_size); 226 } 227 228 uint32_t igd_read_opregion(XenPCIPassthroughState *s) 229 { 230 uint32_t val = 0; 231 232 if (!igd_guest_opregion) { 233 return val; 234 } 235 236 val = igd_guest_opregion; 237 238 XEN_PT_LOG(&s->dev, "Read opregion val=%x\n", val); 239 return val; 240 } 241 242 #define XEN_PCI_INTEL_OPREGION_PAGES 0x3 243 #define XEN_PCI_INTEL_OPREGION_ENABLE_ACCESSED 0x1 244 void igd_write_opregion(XenPCIPassthroughState *s, uint32_t val) 245 { 246 int ret; 247 248 if (igd_guest_opregion) { 249 XEN_PT_LOG(&s->dev, "opregion register already been set, ignoring %x\n", 250 val); 251 return; 252 } 253 254 /* We just work with LE. */ 255 xen_host_pci_get_block(&s->real_device, XEN_PCI_INTEL_OPREGION, 256 (uint8_t *)&igd_host_opregion, 4); 257 igd_guest_opregion = (unsigned long)(val & ~XEN_PCI_INTEL_OPREGION_MASK) 258 | (igd_host_opregion & XEN_PCI_INTEL_OPREGION_MASK); 259 260 ret = xc_domain_iomem_permission(xen_xc, xen_domid, 261 (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT), 262 XEN_PCI_INTEL_OPREGION_PAGES, 263 XEN_PCI_INTEL_OPREGION_ENABLE_ACCESSED); 264 265 if (ret) { 266 XEN_PT_ERR(&s->dev, "[%d]:Can't enable to access IGD host opregion:" 267 " 0x%lx.\n", ret, 268 (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT)), 269 igd_guest_opregion = 0; 270 return; 271 } 272 273 ret = xc_domain_memory_mapping(xen_xc, xen_domid, 274 (unsigned long)(igd_guest_opregion >> XC_PAGE_SHIFT), 275 (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT), 276 XEN_PCI_INTEL_OPREGION_PAGES, 277 DPCI_ADD_MAPPING); 278 279 if (ret) { 280 XEN_PT_ERR(&s->dev, "[%d]:Can't map IGD host opregion:0x%lx to" 281 " guest opregion:0x%lx.\n", ret, 282 (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT), 283 (unsigned long)(igd_guest_opregion >> XC_PAGE_SHIFT)); 284 igd_guest_opregion = 0; 285 return; 286 } 287 288 XEN_PT_LOG(&s->dev, "Map OpRegion: 0x%lx -> 0x%lx\n", 289 (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT), 290 (unsigned long)(igd_guest_opregion >> XC_PAGE_SHIFT)); 291 } 292 293 typedef struct { 294 uint16_t gpu_device_id; 295 uint16_t pch_device_id; 296 uint8_t pch_revision_id; 297 } IGDDeviceIDInfo; 298 299 /* 300 * In real world different GPU should have different PCH. But actually 301 * the different PCH DIDs likely map to different PCH SKUs. We do the 302 * same thing for the GPU. For PCH, the different SKUs are going to be 303 * all the same silicon design and implementation, just different 304 * features turn on and off with fuses. The SW interfaces should be 305 * consistent across all SKUs in a given family (eg LPT). But just same 306 * features may not be supported. 307 * 308 * Most of these different PCH features probably don't matter to the 309 * Gfx driver, but obviously any difference in display port connections 310 * will so it should be fine with any PCH in case of passthrough. 311 * 312 * So currently use one PCH version, 0x8c4e, to cover all HSW(Haswell) 313 * scenarios, 0x9cc3 for BDW(Broadwell). 314 */ 315 static const IGDDeviceIDInfo igd_combo_id_infos[] = { 316 /* HSW Classic */ 317 {0x0402, 0x8c4e, 0x04}, /* HSWGT1D, HSWD_w7 */ 318 {0x0406, 0x8c4e, 0x04}, /* HSWGT1M, HSWM_w7 */ 319 {0x0412, 0x8c4e, 0x04}, /* HSWGT2D, HSWD_w7 */ 320 {0x0416, 0x8c4e, 0x04}, /* HSWGT2M, HSWM_w7 */ 321 {0x041E, 0x8c4e, 0x04}, /* HSWGT15D, HSWD_w7 */ 322 /* HSW ULT */ 323 {0x0A06, 0x8c4e, 0x04}, /* HSWGT1UT, HSWM_w7 */ 324 {0x0A16, 0x8c4e, 0x04}, /* HSWGT2UT, HSWM_w7 */ 325 {0x0A26, 0x8c4e, 0x06}, /* HSWGT3UT, HSWM_w7 */ 326 {0x0A2E, 0x8c4e, 0x04}, /* HSWGT3UT28W, HSWM_w7 */ 327 {0x0A1E, 0x8c4e, 0x04}, /* HSWGT2UX, HSWM_w7 */ 328 {0x0A0E, 0x8c4e, 0x04}, /* HSWGT1ULX, HSWM_w7 */ 329 /* HSW CRW */ 330 {0x0D26, 0x8c4e, 0x04}, /* HSWGT3CW, HSWM_w7 */ 331 {0x0D22, 0x8c4e, 0x04}, /* HSWGT3CWDT, HSWD_w7 */ 332 /* HSW Server */ 333 {0x041A, 0x8c4e, 0x04}, /* HSWSVGT2, HSWD_w7 */ 334 /* HSW SRVR */ 335 {0x040A, 0x8c4e, 0x04}, /* HSWSVGT1, HSWD_w7 */ 336 /* BSW */ 337 {0x1606, 0x9cc3, 0x03}, /* BDWULTGT1, BDWM_w7 */ 338 {0x1616, 0x9cc3, 0x03}, /* BDWULTGT2, BDWM_w7 */ 339 {0x1626, 0x9cc3, 0x03}, /* BDWULTGT3, BDWM_w7 */ 340 {0x160E, 0x9cc3, 0x03}, /* BDWULXGT1, BDWM_w7 */ 341 {0x161E, 0x9cc3, 0x03}, /* BDWULXGT2, BDWM_w7 */ 342 {0x1602, 0x9cc3, 0x03}, /* BDWHALOGT1, BDWM_w7 */ 343 {0x1612, 0x9cc3, 0x03}, /* BDWHALOGT2, BDWM_w7 */ 344 {0x1622, 0x9cc3, 0x03}, /* BDWHALOGT3, BDWM_w7 */ 345 {0x162B, 0x9cc3, 0x03}, /* BDWHALO28W, BDWM_w7 */ 346 {0x162A, 0x9cc3, 0x03}, /* BDWGT3WRKS, BDWM_w7 */ 347 {0x162D, 0x9cc3, 0x03}, /* BDWGT3SRVR, BDWM_w7 */ 348 }; 349 350 static void isa_bridge_class_init(ObjectClass *klass, void *data) 351 { 352 DeviceClass *dc = DEVICE_CLASS(klass); 353 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 354 355 dc->desc = "ISA bridge faked to support IGD PT"; 356 set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); 357 k->vendor_id = PCI_VENDOR_ID_INTEL; 358 k->class_id = PCI_CLASS_BRIDGE_ISA; 359 }; 360 361 static const TypeInfo isa_bridge_info = { 362 .name = "igd-passthrough-isa-bridge", 363 .parent = TYPE_PCI_DEVICE, 364 .instance_size = sizeof(PCIDevice), 365 .class_init = isa_bridge_class_init, 366 .interfaces = (InterfaceInfo[]) { 367 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 368 { }, 369 }, 370 }; 371 372 static void pt_graphics_register_types(void) 373 { 374 type_register_static(&isa_bridge_info); 375 } 376 type_init(pt_graphics_register_types) 377 378 void xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s, 379 XenHostPCIDevice *dev) 380 { 381 PCIBus *bus = pci_get_bus(&s->dev); 382 struct PCIDevice *bridge_dev; 383 int i, num; 384 const uint16_t gpu_dev_id = dev->device_id; 385 uint16_t pch_dev_id = 0xffff; 386 uint8_t pch_rev_id = 0; 387 388 num = ARRAY_SIZE(igd_combo_id_infos); 389 for (i = 0; i < num; i++) { 390 if (gpu_dev_id == igd_combo_id_infos[i].gpu_device_id) { 391 pch_dev_id = igd_combo_id_infos[i].pch_device_id; 392 pch_rev_id = igd_combo_id_infos[i].pch_revision_id; 393 } 394 } 395 396 if (pch_dev_id == 0xffff) { 397 return; 398 } 399 400 /* Currently IGD drivers always need to access PCH by 1f.0. */ 401 bridge_dev = pci_create_simple(bus, PCI_DEVFN(0x1f, 0), 402 "igd-passthrough-isa-bridge"); 403 404 /* 405 * Note that vendor id is always PCI_VENDOR_ID_INTEL. 406 */ 407 if (!bridge_dev) { 408 fprintf(stderr, "set igd-passthrough-isa-bridge failed!\n"); 409 return; 410 } 411 pci_config_set_device_id(bridge_dev->config, pch_dev_id); 412 pci_config_set_revision(bridge_dev->config, pch_rev_id); 413 } 414