1 /* 2 * Copyright (c) 2007, Neocleus Corporation. 3 * Copyright (c) 2007, Intel Corporation. 4 * 5 * This work is licensed under the terms of the GNU GPL, version 2. See 6 * the COPYING file in the top-level directory. 7 * 8 * Alex Novik <alex@neocleus.com> 9 * Allen Kay <allen.m.kay@intel.com> 10 * Guy Zana <guy@neocleus.com> 11 * 12 * This file implements direct PCI assignment to a HVM guest 13 */ 14 15 /* 16 * Interrupt Disable policy: 17 * 18 * INTx interrupt: 19 * Initialize(register_real_device) 20 * Map INTx(xc_physdev_map_pirq): 21 * <fail> 22 * - Set real Interrupt Disable bit to '1'. 23 * - Set machine_irq and assigned_device->machine_irq to '0'. 24 * * Don't bind INTx. 25 * 26 * Bind INTx(xc_domain_bind_pt_pci_irq): 27 * <fail> 28 * - Set real Interrupt Disable bit to '1'. 29 * - Unmap INTx. 30 * - Decrement xen_pt_mapped_machine_irq[machine_irq] 31 * - Set assigned_device->machine_irq to '0'. 32 * 33 * Write to Interrupt Disable bit by guest software(xen_pt_cmd_reg_write) 34 * Write '0' 35 * - Set real bit to '0' if assigned_device->machine_irq isn't '0'. 36 * 37 * Write '1' 38 * - Set real bit to '1'. 39 * 40 * MSI interrupt: 41 * Initialize MSI register(xen_pt_msi_setup, xen_pt_msi_update) 42 * Bind MSI(xc_domain_update_msi_irq) 43 * <fail> 44 * - Unmap MSI. 45 * - Set dev->msi->pirq to '-1'. 46 * 47 * MSI-X interrupt: 48 * Initialize MSI-X register(xen_pt_msix_update_one) 49 * Bind MSI-X(xc_domain_update_msi_irq) 50 * <fail> 51 * - Unmap MSI-X. 52 * - Set entry->pirq to '-1'. 53 */ 54 55 #include "qemu/osdep.h" 56 #include "qapi/error.h" 57 #include <sys/ioctl.h> 58 59 #include "hw/pci/pci.h" 60 #include "hw/qdev-properties.h" 61 #include "hw/qdev-properties-system.h" 62 #include "hw/xen/xen.h" 63 #include "hw/xen/xen-legacy-backend.h" 64 #include "xen_pt.h" 65 #include "qemu/range.h" 66 67 static bool has_igd_gfx_passthru; 68 69 bool xen_igd_gfx_pt_enabled(void) 70 { 71 return has_igd_gfx_passthru; 72 } 73 74 void xen_igd_gfx_pt_set(bool value, Error **errp) 75 { 76 has_igd_gfx_passthru = value; 77 } 78 79 #define XEN_PT_NR_IRQS (256) 80 static uint8_t xen_pt_mapped_machine_irq[XEN_PT_NR_IRQS] = {0}; 81 82 void xen_pt_log(const PCIDevice *d, const char *f, ...) 83 { 84 va_list ap; 85 86 va_start(ap, f); 87 if (d) { 88 fprintf(stderr, "[%02x:%02x.%d] ", pci_dev_bus_num(d), 89 PCI_SLOT(d->devfn), PCI_FUNC(d->devfn)); 90 } 91 vfprintf(stderr, f, ap); 92 va_end(ap); 93 } 94 95 /* Config Space */ 96 97 static int xen_pt_pci_config_access_check(PCIDevice *d, uint32_t addr, int len) 98 { 99 /* check offset range */ 100 if (addr > 0xFF) { 101 XEN_PT_ERR(d, "Failed to access register with offset exceeding 0xFF. " 102 "(addr: 0x%02x, len: %d)\n", addr, len); 103 return -1; 104 } 105 106 /* check read size */ 107 if ((len != 1) && (len != 2) && (len != 4)) { 108 XEN_PT_ERR(d, "Failed to access register with invalid access length. " 109 "(addr: 0x%02x, len: %d)\n", addr, len); 110 return -1; 111 } 112 113 /* check offset alignment */ 114 if (addr & (len - 1)) { 115 XEN_PT_ERR(d, "Failed to access register with invalid access size " 116 "alignment. (addr: 0x%02x, len: %d)\n", addr, len); 117 return -1; 118 } 119 120 return 0; 121 } 122 123 int xen_pt_bar_offset_to_index(uint32_t offset) 124 { 125 int index = 0; 126 127 /* check Exp ROM BAR */ 128 if (offset == PCI_ROM_ADDRESS) { 129 return PCI_ROM_SLOT; 130 } 131 132 /* calculate BAR index */ 133 index = (offset - PCI_BASE_ADDRESS_0) >> 2; 134 if (index >= PCI_NUM_REGIONS) { 135 return -1; 136 } 137 138 return index; 139 } 140 141 static uint32_t xen_pt_pci_read_config(PCIDevice *d, uint32_t addr, int len) 142 { 143 XenPCIPassthroughState *s = XEN_PT_DEVICE(d); 144 uint32_t val = 0; 145 XenPTRegGroup *reg_grp_entry = NULL; 146 XenPTReg *reg_entry = NULL; 147 int rc = 0; 148 int emul_len = 0; 149 uint32_t find_addr = addr; 150 151 if (xen_pt_pci_config_access_check(d, addr, len)) { 152 goto exit; 153 } 154 155 /* find register group entry */ 156 reg_grp_entry = xen_pt_find_reg_grp(s, addr); 157 if (reg_grp_entry) { 158 /* check 0-Hardwired register group */ 159 if (reg_grp_entry->reg_grp->grp_type == XEN_PT_GRP_TYPE_HARDWIRED) { 160 /* no need to emulate, just return 0 */ 161 val = 0; 162 goto exit; 163 } 164 } 165 166 /* read I/O device register value */ 167 rc = xen_host_pci_get_block(&s->real_device, addr, (uint8_t *)&val, len); 168 if (rc < 0) { 169 XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc); 170 memset(&val, 0xff, len); 171 } 172 173 /* just return the I/O device register value for 174 * passthrough type register group */ 175 if (reg_grp_entry == NULL) { 176 goto exit; 177 } 178 179 /* adjust the read value to appropriate CFC-CFF window */ 180 val <<= (addr & 3) << 3; 181 emul_len = len; 182 183 /* loop around the guest requested size */ 184 while (emul_len > 0) { 185 /* find register entry to be emulated */ 186 reg_entry = xen_pt_find_reg(reg_grp_entry, find_addr); 187 if (reg_entry) { 188 XenPTRegInfo *reg = reg_entry->reg; 189 uint32_t real_offset = reg_grp_entry->base_offset + reg->offset; 190 uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3); 191 uint8_t *ptr_val = NULL; 192 193 valid_mask <<= (find_addr - real_offset) << 3; 194 ptr_val = (uint8_t *)&val + (real_offset & 3); 195 196 /* do emulation based on register size */ 197 switch (reg->size) { 198 case 1: 199 if (reg->u.b.read) { 200 rc = reg->u.b.read(s, reg_entry, ptr_val, valid_mask); 201 } 202 break; 203 case 2: 204 if (reg->u.w.read) { 205 rc = reg->u.w.read(s, reg_entry, 206 (uint16_t *)ptr_val, valid_mask); 207 } 208 break; 209 case 4: 210 if (reg->u.dw.read) { 211 rc = reg->u.dw.read(s, reg_entry, 212 (uint32_t *)ptr_val, valid_mask); 213 } 214 break; 215 } 216 217 if (rc < 0) { 218 xen_shutdown_fatal_error("Internal error: Invalid read " 219 "emulation. (%s, rc: %d)\n", 220 __func__, rc); 221 return 0; 222 } 223 224 /* calculate next address to find */ 225 emul_len -= reg->size; 226 if (emul_len > 0) { 227 find_addr = real_offset + reg->size; 228 } 229 } else { 230 /* nothing to do with passthrough type register, 231 * continue to find next byte */ 232 emul_len--; 233 find_addr++; 234 } 235 } 236 237 /* need to shift back before returning them to pci bus emulator */ 238 val >>= ((addr & 3) << 3); 239 240 exit: 241 XEN_PT_LOG_CONFIG(d, addr, val, len); 242 return val; 243 } 244 245 static void xen_pt_pci_write_config(PCIDevice *d, uint32_t addr, 246 uint32_t val, int len) 247 { 248 XenPCIPassthroughState *s = XEN_PT_DEVICE(d); 249 int index = 0; 250 XenPTRegGroup *reg_grp_entry = NULL; 251 int rc = 0; 252 uint32_t read_val = 0, wb_mask; 253 int emul_len = 0; 254 XenPTReg *reg_entry = NULL; 255 uint32_t find_addr = addr; 256 XenPTRegInfo *reg = NULL; 257 bool wp_flag = false; 258 259 if (xen_pt_pci_config_access_check(d, addr, len)) { 260 return; 261 } 262 263 XEN_PT_LOG_CONFIG(d, addr, val, len); 264 265 /* check unused BAR register */ 266 index = xen_pt_bar_offset_to_index(addr); 267 if ((index >= 0) && (val != 0)) { 268 uint32_t chk = val; 269 270 if (index == PCI_ROM_SLOT) 271 chk |= (uint32_t)~PCI_ROM_ADDRESS_MASK; 272 273 if ((chk != XEN_PT_BAR_ALLF) && 274 (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED)) { 275 XEN_PT_WARN(d, "Guest attempt to set address to unused " 276 "Base Address Register. (addr: 0x%02x, len: %d)\n", 277 addr, len); 278 } 279 } 280 281 /* find register group entry */ 282 reg_grp_entry = xen_pt_find_reg_grp(s, addr); 283 if (reg_grp_entry) { 284 /* check 0-Hardwired register group */ 285 if (reg_grp_entry->reg_grp->grp_type == XEN_PT_GRP_TYPE_HARDWIRED) { 286 /* ignore silently */ 287 XEN_PT_WARN(d, "Access to 0-Hardwired register. " 288 "(addr: 0x%02x, len: %d)\n", addr, len); 289 return; 290 } 291 } 292 293 rc = xen_host_pci_get_block(&s->real_device, addr, 294 (uint8_t *)&read_val, len); 295 if (rc < 0) { 296 XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc); 297 memset(&read_val, 0xff, len); 298 wb_mask = 0; 299 } else { 300 wb_mask = 0xFFFFFFFF >> ((4 - len) << 3); 301 } 302 303 /* pass directly to the real device for passthrough type register group */ 304 if (reg_grp_entry == NULL) { 305 if (!s->permissive) { 306 wb_mask = 0; 307 wp_flag = true; 308 } 309 goto out; 310 } 311 312 memory_region_transaction_begin(); 313 pci_default_write_config(d, addr, val, len); 314 315 /* adjust the read and write value to appropriate CFC-CFF window */ 316 read_val <<= (addr & 3) << 3; 317 val <<= (addr & 3) << 3; 318 emul_len = len; 319 320 /* loop around the guest requested size */ 321 while (emul_len > 0) { 322 /* find register entry to be emulated */ 323 reg_entry = xen_pt_find_reg(reg_grp_entry, find_addr); 324 if (reg_entry) { 325 reg = reg_entry->reg; 326 uint32_t real_offset = reg_grp_entry->base_offset + reg->offset; 327 uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3); 328 uint8_t *ptr_val = NULL; 329 uint32_t wp_mask = reg->emu_mask | reg->ro_mask; 330 331 valid_mask <<= (find_addr - real_offset) << 3; 332 ptr_val = (uint8_t *)&val + (real_offset & 3); 333 if (!s->permissive) { 334 wp_mask |= reg->res_mask; 335 } 336 if (wp_mask == (0xFFFFFFFF >> ((4 - reg->size) << 3))) { 337 wb_mask &= ~((wp_mask >> ((find_addr - real_offset) << 3)) 338 << ((len - emul_len) << 3)); 339 } 340 341 /* do emulation based on register size */ 342 switch (reg->size) { 343 case 1: 344 if (reg->u.b.write) { 345 rc = reg->u.b.write(s, reg_entry, ptr_val, 346 read_val >> ((real_offset & 3) << 3), 347 valid_mask); 348 } 349 break; 350 case 2: 351 if (reg->u.w.write) { 352 rc = reg->u.w.write(s, reg_entry, (uint16_t *)ptr_val, 353 (read_val >> ((real_offset & 3) << 3)), 354 valid_mask); 355 } 356 break; 357 case 4: 358 if (reg->u.dw.write) { 359 rc = reg->u.dw.write(s, reg_entry, (uint32_t *)ptr_val, 360 (read_val >> ((real_offset & 3) << 3)), 361 valid_mask); 362 } 363 break; 364 } 365 366 if (rc < 0) { 367 xen_shutdown_fatal_error("Internal error: Invalid write" 368 " emulation. (%s, rc: %d)\n", 369 __func__, rc); 370 return; 371 } 372 373 /* calculate next address to find */ 374 emul_len -= reg->size; 375 if (emul_len > 0) { 376 find_addr = real_offset + reg->size; 377 } 378 } else { 379 /* nothing to do with passthrough type register, 380 * continue to find next byte */ 381 if (!s->permissive) { 382 wb_mask &= ~(0xff << ((len - emul_len) << 3)); 383 /* Unused BARs will make it here, but we don't want to issue 384 * warnings for writes to them (bogus writes get dealt with 385 * above). 386 */ 387 if (index < 0) { 388 wp_flag = true; 389 } 390 } 391 emul_len--; 392 find_addr++; 393 } 394 } 395 396 /* need to shift back before passing them to xen_host_pci_set_block. */ 397 val >>= (addr & 3) << 3; 398 399 memory_region_transaction_commit(); 400 401 out: 402 if (wp_flag && !s->permissive_warned) { 403 s->permissive_warned = true; 404 xen_pt_log(d, "Write-back to unknown field 0x%02x (partially) inhibited (0x%0*x)\n", 405 addr, len * 2, wb_mask); 406 xen_pt_log(d, "If the device doesn't work, try enabling permissive mode\n"); 407 xen_pt_log(d, "(unsafe) and if it helps report the problem to xen-devel\n"); 408 } 409 for (index = 0; wb_mask; index += len) { 410 /* unknown regs are passed through */ 411 while (!(wb_mask & 0xff)) { 412 index++; 413 wb_mask >>= 8; 414 } 415 len = 0; 416 do { 417 len++; 418 wb_mask >>= 8; 419 } while (wb_mask & 0xff); 420 rc = xen_host_pci_set_block(&s->real_device, addr + index, 421 (uint8_t *)&val + index, len); 422 423 if (rc < 0) { 424 XEN_PT_ERR(d, "xen_host_pci_set_block failed. return value: %d.\n", rc); 425 } 426 } 427 } 428 429 /* register regions */ 430 431 static uint64_t xen_pt_bar_read(void *o, hwaddr addr, 432 unsigned size) 433 { 434 PCIDevice *d = o; 435 /* if this function is called, that probably means that there is a 436 * misconfiguration of the IOMMU. */ 437 XEN_PT_ERR(d, "Should not read BAR through QEMU. @0x"TARGET_FMT_plx"\n", 438 addr); 439 return 0; 440 } 441 static void xen_pt_bar_write(void *o, hwaddr addr, uint64_t val, 442 unsigned size) 443 { 444 PCIDevice *d = o; 445 /* Same comment as xen_pt_bar_read function */ 446 XEN_PT_ERR(d, "Should not write BAR through QEMU. @0x"TARGET_FMT_plx"\n", 447 addr); 448 } 449 450 static const MemoryRegionOps ops = { 451 .endianness = DEVICE_NATIVE_ENDIAN, 452 .read = xen_pt_bar_read, 453 .write = xen_pt_bar_write, 454 }; 455 456 static int xen_pt_register_regions(XenPCIPassthroughState *s, uint16_t *cmd) 457 { 458 int i = 0; 459 XenHostPCIDevice *d = &s->real_device; 460 461 /* Register PIO/MMIO BARs */ 462 for (i = 0; i < PCI_ROM_SLOT; i++) { 463 XenHostPCIIORegion *r = &d->io_regions[i]; 464 uint8_t type; 465 466 if (r->base_addr == 0 || r->size == 0) { 467 continue; 468 } 469 470 s->bases[i].access.u = r->base_addr; 471 472 if (r->type & XEN_HOST_PCI_REGION_TYPE_IO) { 473 type = PCI_BASE_ADDRESS_SPACE_IO; 474 *cmd |= PCI_COMMAND_IO; 475 } else { 476 type = PCI_BASE_ADDRESS_SPACE_MEMORY; 477 if (r->type & XEN_HOST_PCI_REGION_TYPE_PREFETCH) { 478 type |= PCI_BASE_ADDRESS_MEM_PREFETCH; 479 } 480 if (r->type & XEN_HOST_PCI_REGION_TYPE_MEM_64) { 481 type |= PCI_BASE_ADDRESS_MEM_TYPE_64; 482 } 483 *cmd |= PCI_COMMAND_MEMORY; 484 } 485 486 memory_region_init_io(&s->bar[i], OBJECT(s), &ops, &s->dev, 487 "xen-pci-pt-bar", r->size); 488 pci_register_bar(&s->dev, i, type, &s->bar[i]); 489 490 XEN_PT_LOG(&s->dev, "IO region %i registered (size=0x%08"PRIx64 491 " base_addr=0x%08"PRIx64" type: 0x%x)\n", 492 i, r->size, r->base_addr, type); 493 } 494 495 /* Register expansion ROM address */ 496 if (d->rom.base_addr && d->rom.size) { 497 uint32_t bar_data = 0; 498 499 /* Re-set BAR reported by OS, otherwise ROM can't be read. */ 500 if (xen_host_pci_get_long(d, PCI_ROM_ADDRESS, &bar_data)) { 501 return 0; 502 } 503 if ((bar_data & PCI_ROM_ADDRESS_MASK) == 0) { 504 bar_data |= d->rom.base_addr & PCI_ROM_ADDRESS_MASK; 505 xen_host_pci_set_long(d, PCI_ROM_ADDRESS, bar_data); 506 } 507 508 s->bases[PCI_ROM_SLOT].access.maddr = d->rom.base_addr; 509 510 memory_region_init_io(&s->rom, OBJECT(s), &ops, &s->dev, 511 "xen-pci-pt-rom", d->rom.size); 512 pci_register_bar(&s->dev, PCI_ROM_SLOT, PCI_BASE_ADDRESS_MEM_PREFETCH, 513 &s->rom); 514 515 XEN_PT_LOG(&s->dev, "Expansion ROM registered (size=0x%08"PRIx64 516 " base_addr=0x%08"PRIx64")\n", 517 d->rom.size, d->rom.base_addr); 518 } 519 520 xen_pt_register_vga_regions(d); 521 return 0; 522 } 523 524 /* region mapping */ 525 526 static int xen_pt_bar_from_region(XenPCIPassthroughState *s, MemoryRegion *mr) 527 { 528 int i = 0; 529 530 for (i = 0; i < PCI_NUM_REGIONS - 1; i++) { 531 if (mr == &s->bar[i]) { 532 return i; 533 } 534 } 535 if (mr == &s->rom) { 536 return PCI_ROM_SLOT; 537 } 538 return -1; 539 } 540 541 /* 542 * This function checks if an io_region overlaps an io_region from another 543 * device. The io_region to check is provided with (addr, size and type) 544 * A callback can be provided and will be called for every region that is 545 * overlapped. 546 * The return value indicates if the region is overlappsed */ 547 struct CheckBarArgs { 548 XenPCIPassthroughState *s; 549 pcibus_t addr; 550 pcibus_t size; 551 uint8_t type; 552 bool rc; 553 }; 554 static void xen_pt_check_bar_overlap(PCIBus *bus, PCIDevice *d, void *opaque) 555 { 556 struct CheckBarArgs *arg = opaque; 557 XenPCIPassthroughState *s = arg->s; 558 uint8_t type = arg->type; 559 int i; 560 561 if (d->devfn == s->dev.devfn) { 562 return; 563 } 564 565 /* xxx: This ignores bridges. */ 566 for (i = 0; i < PCI_NUM_REGIONS; i++) { 567 const PCIIORegion *r = &d->io_regions[i]; 568 569 if (!r->size) { 570 continue; 571 } 572 if ((type & PCI_BASE_ADDRESS_SPACE_IO) 573 != (r->type & PCI_BASE_ADDRESS_SPACE_IO)) { 574 continue; 575 } 576 577 if (ranges_overlap(arg->addr, arg->size, r->addr, r->size)) { 578 XEN_PT_WARN(&s->dev, 579 "Overlapped to device [%02x:%02x.%d] Region: %i" 580 " (addr: 0x%"FMT_PCIBUS", len: 0x%"FMT_PCIBUS")\n", 581 pci_bus_num(bus), PCI_SLOT(d->devfn), 582 PCI_FUNC(d->devfn), i, r->addr, r->size); 583 arg->rc = true; 584 } 585 } 586 } 587 588 static void xen_pt_region_update(XenPCIPassthroughState *s, 589 MemoryRegionSection *sec, bool adding) 590 { 591 PCIDevice *d = &s->dev; 592 MemoryRegion *mr = sec->mr; 593 int bar = -1; 594 int rc; 595 int op = adding ? DPCI_ADD_MAPPING : DPCI_REMOVE_MAPPING; 596 struct CheckBarArgs args = { 597 .s = s, 598 .addr = sec->offset_within_address_space, 599 .size = int128_get64(sec->size), 600 .rc = false, 601 }; 602 603 bar = xen_pt_bar_from_region(s, mr); 604 if (bar == -1 && (!s->msix || &s->msix->mmio != mr)) { 605 return; 606 } 607 608 if (s->msix && &s->msix->mmio == mr) { 609 if (adding) { 610 s->msix->mmio_base_addr = sec->offset_within_address_space; 611 rc = xen_pt_msix_update_remap(s, s->msix->bar_index); 612 } 613 return; 614 } 615 616 args.type = d->io_regions[bar].type; 617 pci_for_each_device_under_bus(pci_get_bus(d), 618 xen_pt_check_bar_overlap, &args); 619 if (args.rc) { 620 XEN_PT_WARN(d, "Region: %d (addr: 0x%"FMT_PCIBUS 621 ", len: 0x%"FMT_PCIBUS") is overlapped.\n", 622 bar, sec->offset_within_address_space, 623 int128_get64(sec->size)); 624 } 625 626 if (d->io_regions[bar].type & PCI_BASE_ADDRESS_SPACE_IO) { 627 uint32_t guest_port = sec->offset_within_address_space; 628 uint32_t machine_port = s->bases[bar].access.pio_base; 629 uint32_t size = int128_get64(sec->size); 630 rc = xc_domain_ioport_mapping(xen_xc, xen_domid, 631 guest_port, machine_port, size, 632 op); 633 if (rc) { 634 XEN_PT_ERR(d, "%s ioport mapping failed! (err: %i)\n", 635 adding ? "create new" : "remove old", errno); 636 } 637 } else { 638 pcibus_t guest_addr = sec->offset_within_address_space; 639 pcibus_t machine_addr = s->bases[bar].access.maddr 640 + sec->offset_within_region; 641 pcibus_t size = int128_get64(sec->size); 642 rc = xc_domain_memory_mapping(xen_xc, xen_domid, 643 XEN_PFN(guest_addr + XC_PAGE_SIZE - 1), 644 XEN_PFN(machine_addr + XC_PAGE_SIZE - 1), 645 XEN_PFN(size + XC_PAGE_SIZE - 1), 646 op); 647 if (rc) { 648 XEN_PT_ERR(d, "%s mem mapping failed! (err: %i)\n", 649 adding ? "create new" : "remove old", errno); 650 } 651 } 652 } 653 654 static void xen_pt_region_add(MemoryListener *l, MemoryRegionSection *sec) 655 { 656 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState, 657 memory_listener); 658 659 memory_region_ref(sec->mr); 660 xen_pt_region_update(s, sec, true); 661 } 662 663 static void xen_pt_region_del(MemoryListener *l, MemoryRegionSection *sec) 664 { 665 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState, 666 memory_listener); 667 668 xen_pt_region_update(s, sec, false); 669 memory_region_unref(sec->mr); 670 } 671 672 static void xen_pt_io_region_add(MemoryListener *l, MemoryRegionSection *sec) 673 { 674 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState, 675 io_listener); 676 677 memory_region_ref(sec->mr); 678 xen_pt_region_update(s, sec, true); 679 } 680 681 static void xen_pt_io_region_del(MemoryListener *l, MemoryRegionSection *sec) 682 { 683 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState, 684 io_listener); 685 686 xen_pt_region_update(s, sec, false); 687 memory_region_unref(sec->mr); 688 } 689 690 static const MemoryListener xen_pt_memory_listener = { 691 .name = "xen-pt-mem", 692 .region_add = xen_pt_region_add, 693 .region_del = xen_pt_region_del, 694 .priority = 10, 695 }; 696 697 static const MemoryListener xen_pt_io_listener = { 698 .name = "xen-pt-io", 699 .region_add = xen_pt_io_region_add, 700 .region_del = xen_pt_io_region_del, 701 .priority = 10, 702 }; 703 704 /* destroy. */ 705 static void xen_pt_destroy(PCIDevice *d) { 706 707 XenPCIPassthroughState *s = XEN_PT_DEVICE(d); 708 XenHostPCIDevice *host_dev = &s->real_device; 709 uint8_t machine_irq = s->machine_irq; 710 uint8_t intx; 711 int rc; 712 713 if (machine_irq && !xen_host_pci_device_closed(&s->real_device)) { 714 intx = xen_pt_pci_intx(s); 715 rc = xc_domain_unbind_pt_irq(xen_xc, xen_domid, machine_irq, 716 PT_IRQ_TYPE_PCI, 717 pci_dev_bus_num(d), 718 PCI_SLOT(s->dev.devfn), 719 intx, 720 0 /* isa_irq */); 721 if (rc < 0) { 722 XEN_PT_ERR(d, "unbinding of interrupt INT%c failed." 723 " (machine irq: %i, err: %d)" 724 " But bravely continuing on..\n", 725 'a' + intx, machine_irq, errno); 726 } 727 } 728 729 /* N.B. xen_pt_config_delete takes care of freeing them. */ 730 if (s->msi) { 731 xen_pt_msi_disable(s); 732 } 733 if (s->msix) { 734 xen_pt_msix_disable(s); 735 } 736 737 if (machine_irq) { 738 xen_pt_mapped_machine_irq[machine_irq]--; 739 740 if (xen_pt_mapped_machine_irq[machine_irq] == 0) { 741 rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, machine_irq); 742 743 if (rc < 0) { 744 XEN_PT_ERR(d, "unmapping of interrupt %i failed. (err: %d)" 745 " But bravely continuing on..\n", 746 machine_irq, errno); 747 } 748 } 749 s->machine_irq = 0; 750 } 751 752 /* delete all emulated config registers */ 753 xen_pt_config_delete(s); 754 755 xen_pt_unregister_vga_regions(host_dev); 756 757 if (s->listener_set) { 758 memory_listener_unregister(&s->memory_listener); 759 memory_listener_unregister(&s->io_listener); 760 s->listener_set = false; 761 } 762 if (!xen_host_pci_device_closed(&s->real_device)) { 763 xen_host_pci_device_put(&s->real_device); 764 } 765 } 766 /* init */ 767 768 static void xen_pt_realize(PCIDevice *d, Error **errp) 769 { 770 ERRP_GUARD(); 771 XenPCIPassthroughState *s = XEN_PT_DEVICE(d); 772 int i, rc = 0; 773 uint8_t machine_irq = 0, scratch; 774 uint16_t cmd = 0; 775 int pirq = XEN_PT_UNASSIGNED_PIRQ; 776 777 /* register real device */ 778 XEN_PT_LOG(d, "Assigning real physical device %02x:%02x.%d" 779 " to devfn 0x%x\n", 780 s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function, 781 s->dev.devfn); 782 783 xen_host_pci_device_get(&s->real_device, 784 s->hostaddr.domain, s->hostaddr.bus, 785 s->hostaddr.slot, s->hostaddr.function, 786 errp); 787 if (*errp) { 788 error_append_hint(errp, "Failed to \"open\" the real pci device"); 789 return; 790 } 791 792 s->is_virtfn = s->real_device.is_virtfn; 793 if (s->is_virtfn) { 794 XEN_PT_LOG(d, "%04x:%02x:%02x.%d is a SR-IOV Virtual Function\n", 795 s->real_device.domain, s->real_device.bus, 796 s->real_device.dev, s->real_device.func); 797 } 798 799 /* Initialize virtualized PCI configuration (Extended 256 Bytes) */ 800 memset(d->config, 0, PCI_CONFIG_SPACE_SIZE); 801 802 s->memory_listener = xen_pt_memory_listener; 803 s->io_listener = xen_pt_io_listener; 804 805 /* Setup VGA bios for passthrough GFX */ 806 if ((s->real_device.domain == 0) && (s->real_device.bus == 0) && 807 (s->real_device.dev == 2) && (s->real_device.func == 0)) { 808 if (!is_igd_vga_passthrough(&s->real_device)) { 809 error_setg(errp, "Need to enable igd-passthru if you're trying" 810 " to passthrough IGD GFX"); 811 xen_host_pci_device_put(&s->real_device); 812 return; 813 } 814 815 xen_pt_setup_vga(s, &s->real_device, errp); 816 if (*errp) { 817 error_append_hint(errp, "Setup VGA BIOS of passthrough" 818 " GFX failed"); 819 xen_host_pci_device_put(&s->real_device); 820 return; 821 } 822 823 /* Register ISA bridge for passthrough GFX. */ 824 xen_igd_passthrough_isa_bridge_create(s, &s->real_device); 825 } 826 827 /* Handle real device's MMIO/PIO BARs */ 828 xen_pt_register_regions(s, &cmd); 829 830 /* reinitialize each config register to be emulated */ 831 xen_pt_config_init(s, errp); 832 if (*errp) { 833 error_append_hint(errp, "PCI Config space initialisation failed"); 834 rc = -1; 835 goto err_out; 836 } 837 838 /* Bind interrupt */ 839 rc = xen_host_pci_get_byte(&s->real_device, PCI_INTERRUPT_PIN, &scratch); 840 if (rc) { 841 error_setg_errno(errp, errno, "Failed to read PCI_INTERRUPT_PIN"); 842 goto err_out; 843 } 844 if (!scratch) { 845 XEN_PT_LOG(d, "no pin interrupt\n"); 846 goto out; 847 } 848 849 machine_irq = s->real_device.irq; 850 if (machine_irq == 0) { 851 XEN_PT_LOG(d, "machine irq is 0\n"); 852 cmd |= PCI_COMMAND_INTX_DISABLE; 853 goto out; 854 } 855 856 rc = xc_physdev_map_pirq(xen_xc, xen_domid, machine_irq, &pirq); 857 if (rc < 0) { 858 XEN_PT_ERR(d, "Mapping machine irq %u to pirq %i failed, (err: %d)\n", 859 machine_irq, pirq, errno); 860 861 /* Disable PCI intx assertion (turn on bit10 of devctl) */ 862 cmd |= PCI_COMMAND_INTX_DISABLE; 863 machine_irq = 0; 864 s->machine_irq = 0; 865 } else { 866 machine_irq = pirq; 867 s->machine_irq = pirq; 868 xen_pt_mapped_machine_irq[machine_irq]++; 869 } 870 871 /* bind machine_irq to device */ 872 if (machine_irq != 0) { 873 uint8_t e_intx = xen_pt_pci_intx(s); 874 875 rc = xc_domain_bind_pt_pci_irq(xen_xc, xen_domid, machine_irq, 876 pci_dev_bus_num(d), 877 PCI_SLOT(d->devfn), 878 e_intx); 879 if (rc < 0) { 880 XEN_PT_ERR(d, "Binding of interrupt %i failed! (err: %d)\n", 881 e_intx, errno); 882 883 /* Disable PCI intx assertion (turn on bit10 of devctl) */ 884 cmd |= PCI_COMMAND_INTX_DISABLE; 885 xen_pt_mapped_machine_irq[machine_irq]--; 886 887 if (xen_pt_mapped_machine_irq[machine_irq] == 0) { 888 if (xc_physdev_unmap_pirq(xen_xc, xen_domid, machine_irq)) { 889 XEN_PT_ERR(d, "Unmapping of machine interrupt %i failed!" 890 " (err: %d)\n", machine_irq, errno); 891 } 892 } 893 s->machine_irq = 0; 894 } 895 } 896 897 out: 898 if (cmd) { 899 uint16_t val; 900 901 rc = xen_host_pci_get_word(&s->real_device, PCI_COMMAND, &val); 902 if (rc) { 903 error_setg_errno(errp, errno, "Failed to read PCI_COMMAND"); 904 goto err_out; 905 } else { 906 val |= cmd; 907 rc = xen_host_pci_set_word(&s->real_device, PCI_COMMAND, val); 908 if (rc) { 909 error_setg_errno(errp, errno, "Failed to write PCI_COMMAND" 910 " val = 0x%x", val); 911 goto err_out; 912 } 913 } 914 } 915 916 memory_listener_register(&s->memory_listener, &address_space_memory); 917 memory_listener_register(&s->io_listener, &address_space_io); 918 s->listener_set = true; 919 XEN_PT_LOG(d, 920 "Real physical device %02x:%02x.%d registered successfully\n", 921 s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function); 922 923 return; 924 925 err_out: 926 for (i = 0; i < PCI_ROM_SLOT; i++) { 927 object_unparent(OBJECT(&s->bar[i])); 928 } 929 object_unparent(OBJECT(&s->rom)); 930 931 xen_pt_destroy(d); 932 assert(rc); 933 } 934 935 static void xen_pt_unregister_device(PCIDevice *d) 936 { 937 xen_pt_destroy(d); 938 } 939 940 static Property xen_pci_passthrough_properties[] = { 941 DEFINE_PROP_PCI_HOST_DEVADDR("hostaddr", XenPCIPassthroughState, hostaddr), 942 DEFINE_PROP_BOOL("permissive", XenPCIPassthroughState, permissive, false), 943 DEFINE_PROP_END_OF_LIST(), 944 }; 945 946 static void xen_pci_passthrough_instance_init(Object *obj) 947 { 948 /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command 949 * line, therefore, no need to wait to realize like other devices */ 950 PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS; 951 } 952 953 static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data) 954 { 955 DeviceClass *dc = DEVICE_CLASS(klass); 956 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 957 958 k->realize = xen_pt_realize; 959 k->exit = xen_pt_unregister_device; 960 k->config_read = xen_pt_pci_read_config; 961 k->config_write = xen_pt_pci_write_config; 962 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 963 dc->desc = "Assign an host PCI device with Xen"; 964 device_class_set_props(dc, xen_pci_passthrough_properties); 965 }; 966 967 static void xen_pci_passthrough_finalize(Object *obj) 968 { 969 XenPCIPassthroughState *s = XEN_PT_DEVICE(obj); 970 971 xen_pt_msix_delete(s); 972 } 973 974 static const TypeInfo xen_pci_passthrough_info = { 975 .name = TYPE_XEN_PT_DEVICE, 976 .parent = TYPE_PCI_DEVICE, 977 .instance_size = sizeof(XenPCIPassthroughState), 978 .instance_finalize = xen_pci_passthrough_finalize, 979 .class_init = xen_pci_passthrough_class_init, 980 .instance_init = xen_pci_passthrough_instance_init, 981 .interfaces = (InterfaceInfo[]) { 982 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 983 { INTERFACE_PCIE_DEVICE }, 984 { }, 985 }, 986 }; 987 988 static void xen_pci_passthrough_register_types(void) 989 { 990 type_register_static(&xen_pci_passthrough_info); 991 } 992 993 type_init(xen_pci_passthrough_register_types) 994