1 /* 2 * QEMU PCI bus manager 3 * 4 * Copyright (c) 2004 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "qemu/datadir.h" 27 #include "qemu/units.h" 28 #include "hw/irq.h" 29 #include "hw/pci/pci.h" 30 #include "hw/pci/pci_bridge.h" 31 #include "hw/pci/pci_bus.h" 32 #include "hw/pci/pci_host.h" 33 #include "hw/qdev-properties.h" 34 #include "hw/qdev-properties-system.h" 35 #include "migration/qemu-file-types.h" 36 #include "migration/vmstate.h" 37 #include "net/net.h" 38 #include "sysemu/numa.h" 39 #include "sysemu/runstate.h" 40 #include "sysemu/sysemu.h" 41 #include "hw/loader.h" 42 #include "qemu/error-report.h" 43 #include "qemu/range.h" 44 #include "trace.h" 45 #include "hw/pci/msi.h" 46 #include "hw/pci/msix.h" 47 #include "hw/hotplug.h" 48 #include "hw/boards.h" 49 #include "qapi/error.h" 50 #include "qemu/cutils.h" 51 #include "pci-internal.h" 52 53 #include "hw/xen/xen.h" 54 #include "hw/i386/kvm/xen_evtchn.h" 55 56 //#define DEBUG_PCI 57 #ifdef DEBUG_PCI 58 # define PCI_DPRINTF(format, ...) printf(format, ## __VA_ARGS__) 59 #else 60 # define PCI_DPRINTF(format, ...) do { } while (0) 61 #endif 62 63 bool pci_available = true; 64 65 static char *pcibus_get_dev_path(DeviceState *dev); 66 static char *pcibus_get_fw_dev_path(DeviceState *dev); 67 static void pcibus_reset_hold(Object *obj, ResetType type); 68 static bool pcie_has_upstream_port(PCIDevice *dev); 69 70 static Property pci_props[] = { 71 DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1), 72 DEFINE_PROP_STRING("romfile", PCIDevice, romfile), 73 DEFINE_PROP_UINT32("romsize", PCIDevice, romsize, UINT32_MAX), 74 DEFINE_PROP_UINT32("rombar", PCIDevice, rom_bar, 1), 75 DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present, 76 QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false), 77 DEFINE_PROP_BIT("x-pcie-lnksta-dllla", PCIDevice, cap_present, 78 QEMU_PCIE_LNKSTA_DLLLA_BITNR, true), 79 DEFINE_PROP_BIT("x-pcie-extcap-init", PCIDevice, cap_present, 80 QEMU_PCIE_EXTCAP_INIT_BITNR, true), 81 DEFINE_PROP_STRING("failover_pair_id", PCIDevice, 82 failover_pair_id), 83 DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0), 84 DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present, 85 QEMU_PCIE_ERR_UNC_MASK_BITNR, true), 86 DEFINE_PROP_BIT("x-pcie-ari-nextfn-1", PCIDevice, cap_present, 87 QEMU_PCIE_ARI_NEXTFN_1_BITNR, false), 88 DEFINE_PROP_SIZE32("x-max-bounce-buffer-size", PCIDevice, 89 max_bounce_buffer_size, DEFAULT_MAX_BOUNCE_BUFFER_SIZE), 90 DEFINE_PROP_END_OF_LIST() 91 }; 92 93 static const VMStateDescription vmstate_pcibus = { 94 .name = "PCIBUS", 95 .version_id = 1, 96 .minimum_version_id = 1, 97 .fields = (const VMStateField[]) { 98 VMSTATE_INT32_EQUAL(nirq, PCIBus, NULL), 99 VMSTATE_VARRAY_INT32(irq_count, PCIBus, 100 nirq, 0, vmstate_info_int32, 101 int32_t), 102 VMSTATE_END_OF_LIST() 103 } 104 }; 105 106 static gint g_cmp_uint32(gconstpointer a, gconstpointer b, gpointer user_data) 107 { 108 return a - b; 109 } 110 111 static GSequence *pci_acpi_index_list(void) 112 { 113 static GSequence *used_acpi_index_list; 114 115 if (!used_acpi_index_list) { 116 used_acpi_index_list = g_sequence_new(NULL); 117 } 118 return used_acpi_index_list; 119 } 120 121 static void pci_init_bus_master(PCIDevice *pci_dev) 122 { 123 AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev); 124 125 memory_region_init_alias(&pci_dev->bus_master_enable_region, 126 OBJECT(pci_dev), "bus master", 127 dma_as->root, 0, memory_region_size(dma_as->root)); 128 memory_region_set_enabled(&pci_dev->bus_master_enable_region, false); 129 memory_region_add_subregion(&pci_dev->bus_master_container_region, 0, 130 &pci_dev->bus_master_enable_region); 131 } 132 133 static void pcibus_machine_done(Notifier *notifier, void *data) 134 { 135 PCIBus *bus = container_of(notifier, PCIBus, machine_done); 136 int i; 137 138 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 139 if (bus->devices[i]) { 140 pci_init_bus_master(bus->devices[i]); 141 } 142 } 143 } 144 145 static void pci_bus_realize(BusState *qbus, Error **errp) 146 { 147 PCIBus *bus = PCI_BUS(qbus); 148 149 bus->machine_done.notify = pcibus_machine_done; 150 qemu_add_machine_init_done_notifier(&bus->machine_done); 151 152 vmstate_register_any(NULL, &vmstate_pcibus, bus); 153 } 154 155 static void pcie_bus_realize(BusState *qbus, Error **errp) 156 { 157 PCIBus *bus = PCI_BUS(qbus); 158 Error *local_err = NULL; 159 160 pci_bus_realize(qbus, &local_err); 161 if (local_err) { 162 error_propagate(errp, local_err); 163 return; 164 } 165 166 /* 167 * A PCI-E bus can support extended config space if it's the root 168 * bus, or if the bus/bridge above it does as well 169 */ 170 if (pci_bus_is_root(bus)) { 171 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; 172 } else { 173 PCIBus *parent_bus = pci_get_bus(bus->parent_dev); 174 175 if (pci_bus_allows_extended_config_space(parent_bus)) { 176 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; 177 } 178 } 179 } 180 181 static void pci_bus_unrealize(BusState *qbus) 182 { 183 PCIBus *bus = PCI_BUS(qbus); 184 185 qemu_remove_machine_init_done_notifier(&bus->machine_done); 186 187 vmstate_unregister(NULL, &vmstate_pcibus, bus); 188 } 189 190 static int pcibus_num(PCIBus *bus) 191 { 192 if (pci_bus_is_root(bus)) { 193 return 0; /* pci host bridge */ 194 } 195 return bus->parent_dev->config[PCI_SECONDARY_BUS]; 196 } 197 198 static uint16_t pcibus_numa_node(PCIBus *bus) 199 { 200 return NUMA_NODE_UNASSIGNED; 201 } 202 203 static void pci_bus_class_init(ObjectClass *klass, void *data) 204 { 205 BusClass *k = BUS_CLASS(klass); 206 PCIBusClass *pbc = PCI_BUS_CLASS(klass); 207 ResettableClass *rc = RESETTABLE_CLASS(klass); 208 209 k->print_dev = pcibus_dev_print; 210 k->get_dev_path = pcibus_get_dev_path; 211 k->get_fw_dev_path = pcibus_get_fw_dev_path; 212 k->realize = pci_bus_realize; 213 k->unrealize = pci_bus_unrealize; 214 215 rc->phases.hold = pcibus_reset_hold; 216 217 pbc->bus_num = pcibus_num; 218 pbc->numa_node = pcibus_numa_node; 219 } 220 221 static const TypeInfo pci_bus_info = { 222 .name = TYPE_PCI_BUS, 223 .parent = TYPE_BUS, 224 .instance_size = sizeof(PCIBus), 225 .class_size = sizeof(PCIBusClass), 226 .class_init = pci_bus_class_init, 227 }; 228 229 static const TypeInfo cxl_interface_info = { 230 .name = INTERFACE_CXL_DEVICE, 231 .parent = TYPE_INTERFACE, 232 }; 233 234 static const TypeInfo pcie_interface_info = { 235 .name = INTERFACE_PCIE_DEVICE, 236 .parent = TYPE_INTERFACE, 237 }; 238 239 static const TypeInfo conventional_pci_interface_info = { 240 .name = INTERFACE_CONVENTIONAL_PCI_DEVICE, 241 .parent = TYPE_INTERFACE, 242 }; 243 244 static void pcie_bus_class_init(ObjectClass *klass, void *data) 245 { 246 BusClass *k = BUS_CLASS(klass); 247 248 k->realize = pcie_bus_realize; 249 } 250 251 static const TypeInfo pcie_bus_info = { 252 .name = TYPE_PCIE_BUS, 253 .parent = TYPE_PCI_BUS, 254 .class_init = pcie_bus_class_init, 255 }; 256 257 static const TypeInfo cxl_bus_info = { 258 .name = TYPE_CXL_BUS, 259 .parent = TYPE_PCIE_BUS, 260 .class_init = pcie_bus_class_init, 261 }; 262 263 static void pci_update_mappings(PCIDevice *d); 264 static void pci_irq_handler(void *opaque, int irq_num, int level); 265 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, Error **); 266 static void pci_del_option_rom(PCIDevice *pdev); 267 268 static uint16_t pci_default_sub_vendor_id = PCI_SUBVENDOR_ID_REDHAT_QUMRANET; 269 static uint16_t pci_default_sub_device_id = PCI_SUBDEVICE_ID_QEMU; 270 271 PCIHostStateList pci_host_bridges; 272 273 int pci_bar(PCIDevice *d, int reg) 274 { 275 uint8_t type; 276 277 /* PCIe virtual functions do not have their own BARs */ 278 assert(!pci_is_vf(d)); 279 280 if (reg != PCI_ROM_SLOT) 281 return PCI_BASE_ADDRESS_0 + reg * 4; 282 283 type = d->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; 284 return type == PCI_HEADER_TYPE_BRIDGE ? PCI_ROM_ADDRESS1 : PCI_ROM_ADDRESS; 285 } 286 287 static inline int pci_irq_state(PCIDevice *d, int irq_num) 288 { 289 return (d->irq_state >> irq_num) & 0x1; 290 } 291 292 static inline void pci_set_irq_state(PCIDevice *d, int irq_num, int level) 293 { 294 d->irq_state &= ~(0x1 << irq_num); 295 d->irq_state |= level << irq_num; 296 } 297 298 static void pci_bus_change_irq_level(PCIBus *bus, int irq_num, int change) 299 { 300 assert(irq_num >= 0); 301 assert(irq_num < bus->nirq); 302 bus->irq_count[irq_num] += change; 303 bus->set_irq(bus->irq_opaque, irq_num, bus->irq_count[irq_num] != 0); 304 } 305 306 static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change) 307 { 308 PCIBus *bus; 309 for (;;) { 310 int dev_irq = irq_num; 311 bus = pci_get_bus(pci_dev); 312 assert(bus->map_irq); 313 irq_num = bus->map_irq(pci_dev, irq_num); 314 trace_pci_route_irq(dev_irq, DEVICE(pci_dev)->canonical_path, irq_num, 315 pci_bus_is_root(bus) ? "root-complex" 316 : DEVICE(bus->parent_dev)->canonical_path); 317 if (bus->set_irq) 318 break; 319 pci_dev = bus->parent_dev; 320 } 321 pci_bus_change_irq_level(bus, irq_num, change); 322 } 323 324 int pci_bus_get_irq_level(PCIBus *bus, int irq_num) 325 { 326 assert(irq_num >= 0); 327 assert(irq_num < bus->nirq); 328 return !!bus->irq_count[irq_num]; 329 } 330 331 /* Update interrupt status bit in config space on interrupt 332 * state change. */ 333 static void pci_update_irq_status(PCIDevice *dev) 334 { 335 if (dev->irq_state) { 336 dev->config[PCI_STATUS] |= PCI_STATUS_INTERRUPT; 337 } else { 338 dev->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; 339 } 340 } 341 342 void pci_device_deassert_intx(PCIDevice *dev) 343 { 344 int i; 345 for (i = 0; i < PCI_NUM_PINS; ++i) { 346 pci_irq_handler(dev, i, 0); 347 } 348 } 349 350 static void pci_msi_trigger(PCIDevice *dev, MSIMessage msg) 351 { 352 MemTxAttrs attrs = {}; 353 354 /* 355 * Xen uses the high bits of the address to contain some of the bits 356 * of the PIRQ#. Therefore we can't just send the write cycle and 357 * trust that it's caught by the APIC at 0xfee00000 because the 358 * target of the write might be e.g. 0x0x1000fee46000 for PIRQ#4166. 359 * So we intercept the delivery here instead of in kvm_send_msi(). 360 */ 361 if (xen_mode == XEN_EMULATE && 362 xen_evtchn_deliver_pirq_msi(msg.address, msg.data)) { 363 return; 364 } 365 attrs.requester_id = pci_requester_id(dev); 366 address_space_stl_le(&dev->bus_master_as, msg.address, msg.data, 367 attrs, NULL); 368 } 369 370 static void pci_reset_regions(PCIDevice *dev) 371 { 372 int r; 373 if (pci_is_vf(dev)) { 374 return; 375 } 376 377 for (r = 0; r < PCI_NUM_REGIONS; ++r) { 378 PCIIORegion *region = &dev->io_regions[r]; 379 if (!region->size) { 380 continue; 381 } 382 383 if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) && 384 region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 385 pci_set_quad(dev->config + pci_bar(dev, r), region->type); 386 } else { 387 pci_set_long(dev->config + pci_bar(dev, r), region->type); 388 } 389 } 390 } 391 392 static void pci_do_device_reset(PCIDevice *dev) 393 { 394 pci_device_deassert_intx(dev); 395 assert(dev->irq_state == 0); 396 397 /* Clear all writable bits */ 398 pci_word_test_and_clear_mask(dev->config + PCI_COMMAND, 399 pci_get_word(dev->wmask + PCI_COMMAND) | 400 pci_get_word(dev->w1cmask + PCI_COMMAND)); 401 pci_word_test_and_clear_mask(dev->config + PCI_STATUS, 402 pci_get_word(dev->wmask + PCI_STATUS) | 403 pci_get_word(dev->w1cmask + PCI_STATUS)); 404 /* Some devices make bits of PCI_INTERRUPT_LINE read only */ 405 pci_byte_test_and_clear_mask(dev->config + PCI_INTERRUPT_LINE, 406 pci_get_word(dev->wmask + PCI_INTERRUPT_LINE) | 407 pci_get_word(dev->w1cmask + PCI_INTERRUPT_LINE)); 408 dev->config[PCI_CACHE_LINE_SIZE] = 0x0; 409 pci_reset_regions(dev); 410 pci_update_mappings(dev); 411 412 msi_reset(dev); 413 msix_reset(dev); 414 pcie_sriov_pf_reset(dev); 415 } 416 417 /* 418 * This function is called on #RST and FLR. 419 * FLR if PCI_EXP_DEVCTL_BCR_FLR is set 420 */ 421 void pci_device_reset(PCIDevice *dev) 422 { 423 device_cold_reset(&dev->qdev); 424 pci_do_device_reset(dev); 425 } 426 427 /* 428 * Trigger pci bus reset under a given bus. 429 * Called via bus_cold_reset on RST# assert, after the devices 430 * have been reset device_cold_reset-ed already. 431 */ 432 static void pcibus_reset_hold(Object *obj, ResetType type) 433 { 434 PCIBus *bus = PCI_BUS(obj); 435 int i; 436 437 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 438 if (bus->devices[i]) { 439 pci_do_device_reset(bus->devices[i]); 440 } 441 } 442 443 for (i = 0; i < bus->nirq; i++) { 444 assert(bus->irq_count[i] == 0); 445 } 446 } 447 448 static void pci_host_bus_register(DeviceState *host) 449 { 450 PCIHostState *host_bridge = PCI_HOST_BRIDGE(host); 451 452 QLIST_INSERT_HEAD(&pci_host_bridges, host_bridge, next); 453 } 454 455 static void pci_host_bus_unregister(DeviceState *host) 456 { 457 PCIHostState *host_bridge = PCI_HOST_BRIDGE(host); 458 459 QLIST_REMOVE(host_bridge, next); 460 } 461 462 PCIBus *pci_device_root_bus(const PCIDevice *d) 463 { 464 PCIBus *bus = pci_get_bus(d); 465 466 while (!pci_bus_is_root(bus)) { 467 d = bus->parent_dev; 468 assert(d != NULL); 469 470 bus = pci_get_bus(d); 471 } 472 473 return bus; 474 } 475 476 const char *pci_root_bus_path(PCIDevice *dev) 477 { 478 PCIBus *rootbus = pci_device_root_bus(dev); 479 PCIHostState *host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent); 480 PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_GET_CLASS(host_bridge); 481 482 assert(host_bridge->bus == rootbus); 483 484 if (hc->root_bus_path) { 485 return (*hc->root_bus_path)(host_bridge, rootbus); 486 } 487 488 return rootbus->qbus.name; 489 } 490 491 bool pci_bus_bypass_iommu(PCIBus *bus) 492 { 493 PCIBus *rootbus = bus; 494 PCIHostState *host_bridge; 495 496 if (!pci_bus_is_root(bus)) { 497 rootbus = pci_device_root_bus(bus->parent_dev); 498 } 499 500 host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent); 501 502 assert(host_bridge->bus == rootbus); 503 504 return host_bridge->bypass_iommu; 505 } 506 507 static void pci_root_bus_internal_init(PCIBus *bus, DeviceState *parent, 508 MemoryRegion *mem, MemoryRegion *io, 509 uint8_t devfn_min) 510 { 511 assert(PCI_FUNC(devfn_min) == 0); 512 bus->devfn_min = devfn_min; 513 bus->slot_reserved_mask = 0x0; 514 bus->address_space_mem = mem; 515 bus->address_space_io = io; 516 bus->flags |= PCI_BUS_IS_ROOT; 517 518 /* host bridge */ 519 QLIST_INIT(&bus->child); 520 521 pci_host_bus_register(parent); 522 } 523 524 static void pci_bus_uninit(PCIBus *bus) 525 { 526 pci_host_bus_unregister(BUS(bus)->parent); 527 } 528 529 bool pci_bus_is_express(const PCIBus *bus) 530 { 531 return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS); 532 } 533 534 void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent, 535 const char *name, 536 MemoryRegion *mem, MemoryRegion *io, 537 uint8_t devfn_min, const char *typename) 538 { 539 qbus_init(bus, bus_size, typename, parent, name); 540 pci_root_bus_internal_init(bus, parent, mem, io, devfn_min); 541 } 542 543 PCIBus *pci_root_bus_new(DeviceState *parent, const char *name, 544 MemoryRegion *mem, MemoryRegion *io, 545 uint8_t devfn_min, const char *typename) 546 { 547 PCIBus *bus; 548 549 bus = PCI_BUS(qbus_new(typename, parent, name)); 550 pci_root_bus_internal_init(bus, parent, mem, io, devfn_min); 551 return bus; 552 } 553 554 void pci_root_bus_cleanup(PCIBus *bus) 555 { 556 pci_bus_uninit(bus); 557 /* the caller of the unplug hotplug handler will delete this device */ 558 qbus_unrealize(BUS(bus)); 559 } 560 561 void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, 562 void *irq_opaque, int nirq) 563 { 564 bus->set_irq = set_irq; 565 bus->irq_opaque = irq_opaque; 566 bus->nirq = nirq; 567 g_free(bus->irq_count); 568 bus->irq_count = g_malloc0(nirq * sizeof(bus->irq_count[0])); 569 } 570 571 void pci_bus_map_irqs(PCIBus *bus, pci_map_irq_fn map_irq) 572 { 573 bus->map_irq = map_irq; 574 } 575 576 void pci_bus_irqs_cleanup(PCIBus *bus) 577 { 578 bus->set_irq = NULL; 579 bus->map_irq = NULL; 580 bus->irq_opaque = NULL; 581 bus->nirq = 0; 582 g_free(bus->irq_count); 583 bus->irq_count = NULL; 584 } 585 586 PCIBus *pci_register_root_bus(DeviceState *parent, const char *name, 587 pci_set_irq_fn set_irq, pci_map_irq_fn map_irq, 588 void *irq_opaque, 589 MemoryRegion *mem, MemoryRegion *io, 590 uint8_t devfn_min, int nirq, 591 const char *typename) 592 { 593 PCIBus *bus; 594 595 bus = pci_root_bus_new(parent, name, mem, io, devfn_min, typename); 596 pci_bus_irqs(bus, set_irq, irq_opaque, nirq); 597 pci_bus_map_irqs(bus, map_irq); 598 return bus; 599 } 600 601 void pci_unregister_root_bus(PCIBus *bus) 602 { 603 pci_bus_irqs_cleanup(bus); 604 pci_root_bus_cleanup(bus); 605 } 606 607 int pci_bus_num(PCIBus *s) 608 { 609 return PCI_BUS_GET_CLASS(s)->bus_num(s); 610 } 611 612 /* Returns the min and max bus numbers of a PCI bus hierarchy */ 613 void pci_bus_range(PCIBus *bus, int *min_bus, int *max_bus) 614 { 615 int i; 616 *min_bus = *max_bus = pci_bus_num(bus); 617 618 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 619 PCIDevice *dev = bus->devices[i]; 620 621 if (dev && IS_PCI_BRIDGE(dev)) { 622 *min_bus = MIN(*min_bus, dev->config[PCI_SECONDARY_BUS]); 623 *max_bus = MAX(*max_bus, dev->config[PCI_SUBORDINATE_BUS]); 624 } 625 } 626 } 627 628 int pci_bus_numa_node(PCIBus *bus) 629 { 630 return PCI_BUS_GET_CLASS(bus)->numa_node(bus); 631 } 632 633 static int get_pci_config_device(QEMUFile *f, void *pv, size_t size, 634 const VMStateField *field) 635 { 636 PCIDevice *s = container_of(pv, PCIDevice, config); 637 uint8_t *config; 638 int i; 639 640 assert(size == pci_config_size(s)); 641 config = g_malloc(size); 642 643 qemu_get_buffer(f, config, size); 644 for (i = 0; i < size; ++i) { 645 if ((config[i] ^ s->config[i]) & 646 s->cmask[i] & ~s->wmask[i] & ~s->w1cmask[i]) { 647 error_report("%s: Bad config data: i=0x%x read: %x device: %x " 648 "cmask: %x wmask: %x w1cmask:%x", __func__, 649 i, config[i], s->config[i], 650 s->cmask[i], s->wmask[i], s->w1cmask[i]); 651 g_free(config); 652 return -EINVAL; 653 } 654 } 655 memcpy(s->config, config, size); 656 657 pci_update_mappings(s); 658 if (IS_PCI_BRIDGE(s)) { 659 pci_bridge_update_mappings(PCI_BRIDGE(s)); 660 } 661 662 memory_region_set_enabled(&s->bus_master_enable_region, 663 pci_get_word(s->config + PCI_COMMAND) 664 & PCI_COMMAND_MASTER); 665 666 g_free(config); 667 return 0; 668 } 669 670 /* just put buffer */ 671 static int put_pci_config_device(QEMUFile *f, void *pv, size_t size, 672 const VMStateField *field, JSONWriter *vmdesc) 673 { 674 const uint8_t **v = pv; 675 assert(size == pci_config_size(container_of(pv, PCIDevice, config))); 676 qemu_put_buffer(f, *v, size); 677 678 return 0; 679 } 680 681 static const VMStateInfo vmstate_info_pci_config = { 682 .name = "pci config", 683 .get = get_pci_config_device, 684 .put = put_pci_config_device, 685 }; 686 687 static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size, 688 const VMStateField *field) 689 { 690 PCIDevice *s = container_of(pv, PCIDevice, irq_state); 691 uint32_t irq_state[PCI_NUM_PINS]; 692 int i; 693 for (i = 0; i < PCI_NUM_PINS; ++i) { 694 irq_state[i] = qemu_get_be32(f); 695 if (irq_state[i] != 0x1 && irq_state[i] != 0) { 696 fprintf(stderr, "irq state %d: must be 0 or 1.\n", 697 irq_state[i]); 698 return -EINVAL; 699 } 700 } 701 702 for (i = 0; i < PCI_NUM_PINS; ++i) { 703 pci_set_irq_state(s, i, irq_state[i]); 704 } 705 706 return 0; 707 } 708 709 static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size, 710 const VMStateField *field, JSONWriter *vmdesc) 711 { 712 int i; 713 PCIDevice *s = container_of(pv, PCIDevice, irq_state); 714 715 for (i = 0; i < PCI_NUM_PINS; ++i) { 716 qemu_put_be32(f, pci_irq_state(s, i)); 717 } 718 719 return 0; 720 } 721 722 static const VMStateInfo vmstate_info_pci_irq_state = { 723 .name = "pci irq state", 724 .get = get_pci_irq_state, 725 .put = put_pci_irq_state, 726 }; 727 728 static bool migrate_is_pcie(void *opaque, int version_id) 729 { 730 return pci_is_express((PCIDevice *)opaque); 731 } 732 733 static bool migrate_is_not_pcie(void *opaque, int version_id) 734 { 735 return !pci_is_express((PCIDevice *)opaque); 736 } 737 738 const VMStateDescription vmstate_pci_device = { 739 .name = "PCIDevice", 740 .version_id = 2, 741 .minimum_version_id = 1, 742 .fields = (const VMStateField[]) { 743 VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice), 744 VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, 745 migrate_is_not_pcie, 746 0, vmstate_info_pci_config, 747 PCI_CONFIG_SPACE_SIZE), 748 VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, 749 migrate_is_pcie, 750 0, vmstate_info_pci_config, 751 PCIE_CONFIG_SPACE_SIZE), 752 VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2, 753 vmstate_info_pci_irq_state, 754 PCI_NUM_PINS * sizeof(int32_t)), 755 VMSTATE_END_OF_LIST() 756 } 757 }; 758 759 760 void pci_device_save(PCIDevice *s, QEMUFile *f) 761 { 762 /* Clear interrupt status bit: it is implicit 763 * in irq_state which we are saving. 764 * This makes us compatible with old devices 765 * which never set or clear this bit. */ 766 s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; 767 vmstate_save_state(f, &vmstate_pci_device, s, NULL); 768 /* Restore the interrupt status bit. */ 769 pci_update_irq_status(s); 770 } 771 772 int pci_device_load(PCIDevice *s, QEMUFile *f) 773 { 774 int ret; 775 ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id); 776 /* Restore the interrupt status bit. */ 777 pci_update_irq_status(s); 778 return ret; 779 } 780 781 static void pci_set_default_subsystem_id(PCIDevice *pci_dev) 782 { 783 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, 784 pci_default_sub_vendor_id); 785 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 786 pci_default_sub_device_id); 787 } 788 789 /* 790 * Parse [[<domain>:]<bus>:]<slot>, return -1 on error if funcp == NULL 791 * [[<domain>:]<bus>:]<slot>.<func>, return -1 on error 792 */ 793 static int pci_parse_devaddr(const char *addr, int *domp, int *busp, 794 unsigned int *slotp, unsigned int *funcp) 795 { 796 const char *p; 797 char *e; 798 unsigned long val; 799 unsigned long dom = 0, bus = 0; 800 unsigned int slot = 0; 801 unsigned int func = 0; 802 803 p = addr; 804 val = strtoul(p, &e, 16); 805 if (e == p) 806 return -1; 807 if (*e == ':') { 808 bus = val; 809 p = e + 1; 810 val = strtoul(p, &e, 16); 811 if (e == p) 812 return -1; 813 if (*e == ':') { 814 dom = bus; 815 bus = val; 816 p = e + 1; 817 val = strtoul(p, &e, 16); 818 if (e == p) 819 return -1; 820 } 821 } 822 823 slot = val; 824 825 if (funcp != NULL) { 826 if (*e != '.') 827 return -1; 828 829 p = e + 1; 830 val = strtoul(p, &e, 16); 831 if (e == p) 832 return -1; 833 834 func = val; 835 } 836 837 /* if funcp == NULL func is 0 */ 838 if (dom > 0xffff || bus > 0xff || slot > 0x1f || func > 7) 839 return -1; 840 841 if (*e) 842 return -1; 843 844 *domp = dom; 845 *busp = bus; 846 *slotp = slot; 847 if (funcp != NULL) 848 *funcp = func; 849 return 0; 850 } 851 852 static void pci_init_cmask(PCIDevice *dev) 853 { 854 pci_set_word(dev->cmask + PCI_VENDOR_ID, 0xffff); 855 pci_set_word(dev->cmask + PCI_DEVICE_ID, 0xffff); 856 dev->cmask[PCI_STATUS] = PCI_STATUS_CAP_LIST; 857 dev->cmask[PCI_REVISION_ID] = 0xff; 858 dev->cmask[PCI_CLASS_PROG] = 0xff; 859 pci_set_word(dev->cmask + PCI_CLASS_DEVICE, 0xffff); 860 dev->cmask[PCI_HEADER_TYPE] = 0xff; 861 dev->cmask[PCI_CAPABILITY_LIST] = 0xff; 862 } 863 864 static void pci_init_wmask(PCIDevice *dev) 865 { 866 int config_size = pci_config_size(dev); 867 868 dev->wmask[PCI_CACHE_LINE_SIZE] = 0xff; 869 dev->wmask[PCI_INTERRUPT_LINE] = 0xff; 870 pci_set_word(dev->wmask + PCI_COMMAND, 871 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | 872 PCI_COMMAND_INTX_DISABLE); 873 pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR); 874 875 memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff, 876 config_size - PCI_CONFIG_HEADER_SIZE); 877 } 878 879 static void pci_init_w1cmask(PCIDevice *dev) 880 { 881 /* 882 * Note: It's okay to set w1cmask even for readonly bits as 883 * long as their value is hardwired to 0. 884 */ 885 pci_set_word(dev->w1cmask + PCI_STATUS, 886 PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | 887 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT | 888 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY); 889 } 890 891 static void pci_init_mask_bridge(PCIDevice *d) 892 { 893 /* PCI_PRIMARY_BUS, PCI_SECONDARY_BUS, PCI_SUBORDINATE_BUS and 894 PCI_SEC_LATENCY_TIMER */ 895 memset(d->wmask + PCI_PRIMARY_BUS, 0xff, 4); 896 897 /* base and limit */ 898 d->wmask[PCI_IO_BASE] = PCI_IO_RANGE_MASK & 0xff; 899 d->wmask[PCI_IO_LIMIT] = PCI_IO_RANGE_MASK & 0xff; 900 pci_set_word(d->wmask + PCI_MEMORY_BASE, 901 PCI_MEMORY_RANGE_MASK & 0xffff); 902 pci_set_word(d->wmask + PCI_MEMORY_LIMIT, 903 PCI_MEMORY_RANGE_MASK & 0xffff); 904 pci_set_word(d->wmask + PCI_PREF_MEMORY_BASE, 905 PCI_PREF_RANGE_MASK & 0xffff); 906 pci_set_word(d->wmask + PCI_PREF_MEMORY_LIMIT, 907 PCI_PREF_RANGE_MASK & 0xffff); 908 909 /* PCI_PREF_BASE_UPPER32 and PCI_PREF_LIMIT_UPPER32 */ 910 memset(d->wmask + PCI_PREF_BASE_UPPER32, 0xff, 8); 911 912 /* Supported memory and i/o types */ 913 d->config[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_16; 914 d->config[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_16; 915 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_BASE, 916 PCI_PREF_RANGE_TYPE_64); 917 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_LIMIT, 918 PCI_PREF_RANGE_TYPE_64); 919 920 /* 921 * TODO: Bridges default to 10-bit VGA decoding but we currently only 922 * implement 16-bit decoding (no alias support). 923 */ 924 pci_set_word(d->wmask + PCI_BRIDGE_CONTROL, 925 PCI_BRIDGE_CTL_PARITY | 926 PCI_BRIDGE_CTL_SERR | 927 PCI_BRIDGE_CTL_ISA | 928 PCI_BRIDGE_CTL_VGA | 929 PCI_BRIDGE_CTL_VGA_16BIT | 930 PCI_BRIDGE_CTL_MASTER_ABORT | 931 PCI_BRIDGE_CTL_BUS_RESET | 932 PCI_BRIDGE_CTL_FAST_BACK | 933 PCI_BRIDGE_CTL_DISCARD | 934 PCI_BRIDGE_CTL_SEC_DISCARD | 935 PCI_BRIDGE_CTL_DISCARD_SERR); 936 /* Below does not do anything as we never set this bit, put here for 937 * completeness. */ 938 pci_set_word(d->w1cmask + PCI_BRIDGE_CONTROL, 939 PCI_BRIDGE_CTL_DISCARD_STATUS); 940 d->cmask[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_MASK; 941 d->cmask[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_MASK; 942 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_BASE, 943 PCI_PREF_RANGE_TYPE_MASK); 944 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_LIMIT, 945 PCI_PREF_RANGE_TYPE_MASK); 946 } 947 948 static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp) 949 { 950 uint8_t slot = PCI_SLOT(dev->devfn); 951 uint8_t func; 952 953 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 954 dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; 955 } 956 957 /* 958 * With SR/IOV and ARI, a device at function 0 need not be a multifunction 959 * device, as it may just be a VF that ended up with function 0 in 960 * the legacy PCI interpretation. Avoid failing in such cases: 961 */ 962 if (pci_is_vf(dev) && 963 dev->exp.sriov_vf.pf->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 964 return; 965 } 966 967 /* 968 * multifunction bit is interpreted in two ways as follows. 969 * - all functions must set the bit to 1. 970 * Example: Intel X53 971 * - function 0 must set the bit, but the rest function (> 0) 972 * is allowed to leave the bit to 0. 973 * Example: PIIX3(also in qemu), PIIX4(also in qemu), ICH10, 974 * 975 * So OS (at least Linux) checks the bit of only function 0, 976 * and doesn't see the bit of function > 0. 977 * 978 * The below check allows both interpretation. 979 */ 980 if (PCI_FUNC(dev->devfn)) { 981 PCIDevice *f0 = bus->devices[PCI_DEVFN(slot, 0)]; 982 if (f0 && !(f0->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) { 983 /* function 0 should set multifunction bit */ 984 error_setg(errp, "PCI: single function device can't be populated " 985 "in function %x.%x", slot, PCI_FUNC(dev->devfn)); 986 return; 987 } 988 return; 989 } 990 991 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 992 return; 993 } 994 /* function 0 indicates single function, so function > 0 must be NULL */ 995 for (func = 1; func < PCI_FUNC_MAX; ++func) { 996 if (bus->devices[PCI_DEVFN(slot, func)]) { 997 error_setg(errp, "PCI: %x.0 indicates single function, " 998 "but %x.%x is already populated.", 999 slot, slot, func); 1000 return; 1001 } 1002 } 1003 } 1004 1005 static void pci_config_alloc(PCIDevice *pci_dev) 1006 { 1007 int config_size = pci_config_size(pci_dev); 1008 1009 pci_dev->config = g_malloc0(config_size); 1010 pci_dev->cmask = g_malloc0(config_size); 1011 pci_dev->wmask = g_malloc0(config_size); 1012 pci_dev->w1cmask = g_malloc0(config_size); 1013 pci_dev->used = g_malloc0(config_size); 1014 } 1015 1016 static void pci_config_free(PCIDevice *pci_dev) 1017 { 1018 g_free(pci_dev->config); 1019 g_free(pci_dev->cmask); 1020 g_free(pci_dev->wmask); 1021 g_free(pci_dev->w1cmask); 1022 g_free(pci_dev->used); 1023 } 1024 1025 static void do_pci_unregister_device(PCIDevice *pci_dev) 1026 { 1027 pci_get_bus(pci_dev)->devices[pci_dev->devfn] = NULL; 1028 pci_config_free(pci_dev); 1029 1030 if (xen_mode == XEN_EMULATE) { 1031 xen_evtchn_remove_pci_device(pci_dev); 1032 } 1033 if (memory_region_is_mapped(&pci_dev->bus_master_enable_region)) { 1034 memory_region_del_subregion(&pci_dev->bus_master_container_region, 1035 &pci_dev->bus_master_enable_region); 1036 } 1037 address_space_destroy(&pci_dev->bus_master_as); 1038 } 1039 1040 /* Extract PCIReqIDCache into BDF format */ 1041 static uint16_t pci_req_id_cache_extract(PCIReqIDCache *cache) 1042 { 1043 uint8_t bus_n; 1044 uint16_t result; 1045 1046 switch (cache->type) { 1047 case PCI_REQ_ID_BDF: 1048 result = pci_get_bdf(cache->dev); 1049 break; 1050 case PCI_REQ_ID_SECONDARY_BUS: 1051 bus_n = pci_dev_bus_num(cache->dev); 1052 result = PCI_BUILD_BDF(bus_n, 0); 1053 break; 1054 default: 1055 error_report("Invalid PCI requester ID cache type: %d", 1056 cache->type); 1057 exit(1); 1058 break; 1059 } 1060 1061 return result; 1062 } 1063 1064 /* Parse bridges up to the root complex and return requester ID 1065 * cache for specific device. For full PCIe topology, the cache 1066 * result would be exactly the same as getting BDF of the device. 1067 * However, several tricks are required when system mixed up with 1068 * legacy PCI devices and PCIe-to-PCI bridges. 1069 * 1070 * Here we cache the proxy device (and type) not requester ID since 1071 * bus number might change from time to time. 1072 */ 1073 static PCIReqIDCache pci_req_id_cache_get(PCIDevice *dev) 1074 { 1075 PCIDevice *parent; 1076 PCIReqIDCache cache = { 1077 .dev = dev, 1078 .type = PCI_REQ_ID_BDF, 1079 }; 1080 1081 while (!pci_bus_is_root(pci_get_bus(dev))) { 1082 /* We are under PCI/PCIe bridges */ 1083 parent = pci_get_bus(dev)->parent_dev; 1084 if (pci_is_express(parent)) { 1085 if (pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) { 1086 /* When we pass through PCIe-to-PCI/PCIX bridges, we 1087 * override the requester ID using secondary bus 1088 * number of parent bridge with zeroed devfn 1089 * (pcie-to-pci bridge spec chap 2.3). */ 1090 cache.type = PCI_REQ_ID_SECONDARY_BUS; 1091 cache.dev = dev; 1092 } 1093 } else { 1094 /* Legacy PCI, override requester ID with the bridge's 1095 * BDF upstream. When the root complex connects to 1096 * legacy PCI devices (including buses), it can only 1097 * obtain requester ID info from directly attached 1098 * devices. If devices are attached under bridges, only 1099 * the requester ID of the bridge that is directly 1100 * attached to the root complex can be recognized. */ 1101 cache.type = PCI_REQ_ID_BDF; 1102 cache.dev = parent; 1103 } 1104 dev = parent; 1105 } 1106 1107 return cache; 1108 } 1109 1110 uint16_t pci_requester_id(PCIDevice *dev) 1111 { 1112 return pci_req_id_cache_extract(&dev->requester_id_cache); 1113 } 1114 1115 static bool pci_bus_devfn_available(PCIBus *bus, int devfn) 1116 { 1117 return !(bus->devices[devfn]); 1118 } 1119 1120 static bool pci_bus_devfn_reserved(PCIBus *bus, int devfn) 1121 { 1122 return bus->slot_reserved_mask & (1UL << PCI_SLOT(devfn)); 1123 } 1124 1125 uint32_t pci_bus_get_slot_reserved_mask(PCIBus *bus) 1126 { 1127 return bus->slot_reserved_mask; 1128 } 1129 1130 void pci_bus_set_slot_reserved_mask(PCIBus *bus, uint32_t mask) 1131 { 1132 bus->slot_reserved_mask |= mask; 1133 } 1134 1135 void pci_bus_clear_slot_reserved_mask(PCIBus *bus, uint32_t mask) 1136 { 1137 bus->slot_reserved_mask &= ~mask; 1138 } 1139 1140 /* -1 for devfn means auto assign */ 1141 static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, 1142 const char *name, int devfn, 1143 Error **errp) 1144 { 1145 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 1146 PCIConfigReadFunc *config_read = pc->config_read; 1147 PCIConfigWriteFunc *config_write = pc->config_write; 1148 Error *local_err = NULL; 1149 DeviceState *dev = DEVICE(pci_dev); 1150 PCIBus *bus = pci_get_bus(pci_dev); 1151 bool is_bridge = IS_PCI_BRIDGE(pci_dev); 1152 1153 /* Only pci bridges can be attached to extra PCI root buses */ 1154 if (pci_bus_is_root(bus) && bus->parent_dev && !is_bridge) { 1155 error_setg(errp, 1156 "PCI: Only PCI/PCIe bridges can be plugged into %s", 1157 bus->parent_dev->name); 1158 return NULL; 1159 } 1160 1161 if (devfn < 0) { 1162 for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices); 1163 devfn += PCI_FUNC_MAX) { 1164 if (pci_bus_devfn_available(bus, devfn) && 1165 !pci_bus_devfn_reserved(bus, devfn)) { 1166 goto found; 1167 } 1168 } 1169 error_setg(errp, "PCI: no slot/function available for %s, all in use " 1170 "or reserved", name); 1171 return NULL; 1172 found: ; 1173 } else if (pci_bus_devfn_reserved(bus, devfn)) { 1174 error_setg(errp, "PCI: slot %d function %d not available for %s," 1175 " reserved", 1176 PCI_SLOT(devfn), PCI_FUNC(devfn), name); 1177 return NULL; 1178 } else if (!pci_bus_devfn_available(bus, devfn)) { 1179 error_setg(errp, "PCI: slot %d function %d not available for %s," 1180 " in use by %s,id=%s", 1181 PCI_SLOT(devfn), PCI_FUNC(devfn), name, 1182 bus->devices[devfn]->name, bus->devices[devfn]->qdev.id); 1183 return NULL; 1184 } 1185 1186 /* 1187 * Populating function 0 triggers a scan from the guest that 1188 * exposes other non-zero functions. Hence we need to ensure that 1189 * function 0 wasn't added yet. 1190 */ 1191 if (dev->hotplugged && !pci_is_vf(pci_dev) && 1192 pci_get_function_0(pci_dev)) { 1193 error_setg(errp, "PCI: slot %d function 0 already occupied by %s," 1194 " new func %s cannot be exposed to guest.", 1195 PCI_SLOT(pci_get_function_0(pci_dev)->devfn), 1196 pci_get_function_0(pci_dev)->name, 1197 name); 1198 1199 return NULL; 1200 } 1201 1202 pci_dev->devfn = devfn; 1203 pci_dev->requester_id_cache = pci_req_id_cache_get(pci_dev); 1204 pstrcpy(pci_dev->name, sizeof(pci_dev->name), name); 1205 1206 memory_region_init(&pci_dev->bus_master_container_region, OBJECT(pci_dev), 1207 "bus master container", UINT64_MAX); 1208 address_space_init(&pci_dev->bus_master_as, 1209 &pci_dev->bus_master_container_region, pci_dev->name); 1210 pci_dev->bus_master_as.max_bounce_buffer_size = 1211 pci_dev->max_bounce_buffer_size; 1212 1213 if (phase_check(PHASE_MACHINE_READY)) { 1214 pci_init_bus_master(pci_dev); 1215 } 1216 pci_dev->irq_state = 0; 1217 pci_config_alloc(pci_dev); 1218 1219 pci_config_set_vendor_id(pci_dev->config, pc->vendor_id); 1220 pci_config_set_device_id(pci_dev->config, pc->device_id); 1221 pci_config_set_revision(pci_dev->config, pc->revision); 1222 pci_config_set_class(pci_dev->config, pc->class_id); 1223 1224 if (!is_bridge) { 1225 if (pc->subsystem_vendor_id || pc->subsystem_id) { 1226 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, 1227 pc->subsystem_vendor_id); 1228 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 1229 pc->subsystem_id); 1230 } else { 1231 pci_set_default_subsystem_id(pci_dev); 1232 } 1233 } else { 1234 /* subsystem_vendor_id/subsystem_id are only for header type 0 */ 1235 assert(!pc->subsystem_vendor_id); 1236 assert(!pc->subsystem_id); 1237 } 1238 pci_init_cmask(pci_dev); 1239 pci_init_wmask(pci_dev); 1240 pci_init_w1cmask(pci_dev); 1241 if (is_bridge) { 1242 pci_init_mask_bridge(pci_dev); 1243 } 1244 pci_init_multifunction(bus, pci_dev, &local_err); 1245 if (local_err) { 1246 error_propagate(errp, local_err); 1247 do_pci_unregister_device(pci_dev); 1248 return NULL; 1249 } 1250 1251 if (!config_read) 1252 config_read = pci_default_read_config; 1253 if (!config_write) 1254 config_write = pci_default_write_config; 1255 pci_dev->config_read = config_read; 1256 pci_dev->config_write = config_write; 1257 bus->devices[devfn] = pci_dev; 1258 pci_dev->version_id = 2; /* Current pci device vmstate version */ 1259 return pci_dev; 1260 } 1261 1262 static void pci_unregister_io_regions(PCIDevice *pci_dev) 1263 { 1264 PCIIORegion *r; 1265 int i; 1266 1267 for(i = 0; i < PCI_NUM_REGIONS; i++) { 1268 r = &pci_dev->io_regions[i]; 1269 if (!r->size || r->addr == PCI_BAR_UNMAPPED) 1270 continue; 1271 memory_region_del_subregion(r->address_space, r->memory); 1272 } 1273 1274 pci_unregister_vga(pci_dev); 1275 } 1276 1277 static void pci_qdev_unrealize(DeviceState *dev) 1278 { 1279 PCIDevice *pci_dev = PCI_DEVICE(dev); 1280 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 1281 1282 pci_unregister_io_regions(pci_dev); 1283 pci_del_option_rom(pci_dev); 1284 1285 if (pc->exit) { 1286 pc->exit(pci_dev); 1287 } 1288 1289 pci_device_deassert_intx(pci_dev); 1290 do_pci_unregister_device(pci_dev); 1291 1292 pci_dev->msi_trigger = NULL; 1293 1294 /* 1295 * clean up acpi-index so it could reused by another device 1296 */ 1297 if (pci_dev->acpi_index) { 1298 GSequence *used_indexes = pci_acpi_index_list(); 1299 1300 g_sequence_remove(g_sequence_lookup(used_indexes, 1301 GINT_TO_POINTER(pci_dev->acpi_index), 1302 g_cmp_uint32, NULL)); 1303 } 1304 } 1305 1306 void pci_register_bar(PCIDevice *pci_dev, int region_num, 1307 uint8_t type, MemoryRegion *memory) 1308 { 1309 PCIIORegion *r; 1310 uint32_t addr; /* offset in pci config space */ 1311 uint64_t wmask; 1312 pcibus_t size = memory_region_size(memory); 1313 uint8_t hdr_type; 1314 1315 assert(!pci_is_vf(pci_dev)); /* VFs must use pcie_sriov_vf_register_bar */ 1316 assert(region_num >= 0); 1317 assert(region_num < PCI_NUM_REGIONS); 1318 assert(is_power_of_2(size)); 1319 1320 /* A PCI bridge device (with Type 1 header) may only have at most 2 BARs */ 1321 hdr_type = 1322 pci_dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; 1323 assert(hdr_type != PCI_HEADER_TYPE_BRIDGE || region_num < 2); 1324 1325 r = &pci_dev->io_regions[region_num]; 1326 r->addr = PCI_BAR_UNMAPPED; 1327 r->size = size; 1328 r->type = type; 1329 r->memory = memory; 1330 r->address_space = type & PCI_BASE_ADDRESS_SPACE_IO 1331 ? pci_get_bus(pci_dev)->address_space_io 1332 : pci_get_bus(pci_dev)->address_space_mem; 1333 1334 wmask = ~(size - 1); 1335 if (region_num == PCI_ROM_SLOT) { 1336 /* ROM enable bit is writable */ 1337 wmask |= PCI_ROM_ADDRESS_ENABLE; 1338 } 1339 1340 addr = pci_bar(pci_dev, region_num); 1341 pci_set_long(pci_dev->config + addr, type); 1342 1343 if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) && 1344 r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1345 pci_set_quad(pci_dev->wmask + addr, wmask); 1346 pci_set_quad(pci_dev->cmask + addr, ~0ULL); 1347 } else { 1348 pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff); 1349 pci_set_long(pci_dev->cmask + addr, 0xffffffff); 1350 } 1351 } 1352 1353 static void pci_update_vga(PCIDevice *pci_dev) 1354 { 1355 uint16_t cmd; 1356 1357 if (!pci_dev->has_vga) { 1358 return; 1359 } 1360 1361 cmd = pci_get_word(pci_dev->config + PCI_COMMAND); 1362 1363 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_MEM], 1364 cmd & PCI_COMMAND_MEMORY); 1365 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO], 1366 cmd & PCI_COMMAND_IO); 1367 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI], 1368 cmd & PCI_COMMAND_IO); 1369 } 1370 1371 void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem, 1372 MemoryRegion *io_lo, MemoryRegion *io_hi) 1373 { 1374 PCIBus *bus = pci_get_bus(pci_dev); 1375 1376 assert(!pci_dev->has_vga); 1377 1378 assert(memory_region_size(mem) == QEMU_PCI_VGA_MEM_SIZE); 1379 pci_dev->vga_regions[QEMU_PCI_VGA_MEM] = mem; 1380 memory_region_add_subregion_overlap(bus->address_space_mem, 1381 QEMU_PCI_VGA_MEM_BASE, mem, 1); 1382 1383 assert(memory_region_size(io_lo) == QEMU_PCI_VGA_IO_LO_SIZE); 1384 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO] = io_lo; 1385 memory_region_add_subregion_overlap(bus->address_space_io, 1386 QEMU_PCI_VGA_IO_LO_BASE, io_lo, 1); 1387 1388 assert(memory_region_size(io_hi) == QEMU_PCI_VGA_IO_HI_SIZE); 1389 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI] = io_hi; 1390 memory_region_add_subregion_overlap(bus->address_space_io, 1391 QEMU_PCI_VGA_IO_HI_BASE, io_hi, 1); 1392 pci_dev->has_vga = true; 1393 1394 pci_update_vga(pci_dev); 1395 } 1396 1397 void pci_unregister_vga(PCIDevice *pci_dev) 1398 { 1399 PCIBus *bus = pci_get_bus(pci_dev); 1400 1401 if (!pci_dev->has_vga) { 1402 return; 1403 } 1404 1405 memory_region_del_subregion(bus->address_space_mem, 1406 pci_dev->vga_regions[QEMU_PCI_VGA_MEM]); 1407 memory_region_del_subregion(bus->address_space_io, 1408 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO]); 1409 memory_region_del_subregion(bus->address_space_io, 1410 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI]); 1411 pci_dev->has_vga = false; 1412 } 1413 1414 pcibus_t pci_get_bar_addr(PCIDevice *pci_dev, int region_num) 1415 { 1416 return pci_dev->io_regions[region_num].addr; 1417 } 1418 1419 static pcibus_t pci_config_get_bar_addr(PCIDevice *d, int reg, 1420 uint8_t type, pcibus_t size) 1421 { 1422 pcibus_t new_addr; 1423 if (!pci_is_vf(d)) { 1424 int bar = pci_bar(d, reg); 1425 if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1426 new_addr = pci_get_quad(d->config + bar); 1427 } else { 1428 new_addr = pci_get_long(d->config + bar); 1429 } 1430 } else { 1431 PCIDevice *pf = d->exp.sriov_vf.pf; 1432 uint16_t sriov_cap = pf->exp.sriov_cap; 1433 int bar = sriov_cap + PCI_SRIOV_BAR + reg * 4; 1434 uint16_t vf_offset = 1435 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_OFFSET); 1436 uint16_t vf_stride = 1437 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_STRIDE); 1438 uint32_t vf_num = (d->devfn - (pf->devfn + vf_offset)) / vf_stride; 1439 1440 if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1441 new_addr = pci_get_quad(pf->config + bar); 1442 } else { 1443 new_addr = pci_get_long(pf->config + bar); 1444 } 1445 new_addr += vf_num * size; 1446 } 1447 /* The ROM slot has a specific enable bit, keep it intact */ 1448 if (reg != PCI_ROM_SLOT) { 1449 new_addr &= ~(size - 1); 1450 } 1451 return new_addr; 1452 } 1453 1454 pcibus_t pci_bar_address(PCIDevice *d, 1455 int reg, uint8_t type, pcibus_t size) 1456 { 1457 pcibus_t new_addr, last_addr; 1458 uint16_t cmd = pci_get_word(d->config + PCI_COMMAND); 1459 MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); 1460 bool allow_0_address = mc->pci_allow_0_address; 1461 1462 if (type & PCI_BASE_ADDRESS_SPACE_IO) { 1463 if (!(cmd & PCI_COMMAND_IO)) { 1464 return PCI_BAR_UNMAPPED; 1465 } 1466 new_addr = pci_config_get_bar_addr(d, reg, type, size); 1467 last_addr = new_addr + size - 1; 1468 /* Check if 32 bit BAR wraps around explicitly. 1469 * TODO: make priorities correct and remove this work around. 1470 */ 1471 if (last_addr <= new_addr || last_addr >= UINT32_MAX || 1472 (!allow_0_address && new_addr == 0)) { 1473 return PCI_BAR_UNMAPPED; 1474 } 1475 return new_addr; 1476 } 1477 1478 if (!(cmd & PCI_COMMAND_MEMORY)) { 1479 return PCI_BAR_UNMAPPED; 1480 } 1481 new_addr = pci_config_get_bar_addr(d, reg, type, size); 1482 /* the ROM slot has a specific enable bit */ 1483 if (reg == PCI_ROM_SLOT && !(new_addr & PCI_ROM_ADDRESS_ENABLE)) { 1484 return PCI_BAR_UNMAPPED; 1485 } 1486 new_addr &= ~(size - 1); 1487 last_addr = new_addr + size - 1; 1488 /* NOTE: we do not support wrapping */ 1489 /* XXX: as we cannot support really dynamic 1490 mappings, we handle specific values as invalid 1491 mappings. */ 1492 if (last_addr <= new_addr || last_addr == PCI_BAR_UNMAPPED || 1493 (!allow_0_address && new_addr == 0)) { 1494 return PCI_BAR_UNMAPPED; 1495 } 1496 1497 /* Now pcibus_t is 64bit. 1498 * Check if 32 bit BAR wraps around explicitly. 1499 * Without this, PC ide doesn't work well. 1500 * TODO: remove this work around. 1501 */ 1502 if (!(type & PCI_BASE_ADDRESS_MEM_TYPE_64) && last_addr >= UINT32_MAX) { 1503 return PCI_BAR_UNMAPPED; 1504 } 1505 1506 /* 1507 * OS is allowed to set BAR beyond its addressable 1508 * bits. For example, 32 bit OS can set 64bit bar 1509 * to >4G. Check it. TODO: we might need to support 1510 * it in the future for e.g. PAE. 1511 */ 1512 if (last_addr >= HWADDR_MAX) { 1513 return PCI_BAR_UNMAPPED; 1514 } 1515 1516 return new_addr; 1517 } 1518 1519 static void pci_update_mappings(PCIDevice *d) 1520 { 1521 PCIIORegion *r; 1522 int i; 1523 pcibus_t new_addr; 1524 1525 for(i = 0; i < PCI_NUM_REGIONS; i++) { 1526 r = &d->io_regions[i]; 1527 1528 /* this region isn't registered */ 1529 if (!r->size) 1530 continue; 1531 1532 new_addr = pci_bar_address(d, i, r->type, r->size); 1533 if (!d->has_power) { 1534 new_addr = PCI_BAR_UNMAPPED; 1535 } 1536 1537 /* This bar isn't changed */ 1538 if (new_addr == r->addr) 1539 continue; 1540 1541 /* now do the real mapping */ 1542 if (r->addr != PCI_BAR_UNMAPPED) { 1543 trace_pci_update_mappings_del(d->name, pci_dev_bus_num(d), 1544 PCI_SLOT(d->devfn), 1545 PCI_FUNC(d->devfn), 1546 i, r->addr, r->size); 1547 memory_region_del_subregion(r->address_space, r->memory); 1548 } 1549 r->addr = new_addr; 1550 if (r->addr != PCI_BAR_UNMAPPED) { 1551 trace_pci_update_mappings_add(d->name, pci_dev_bus_num(d), 1552 PCI_SLOT(d->devfn), 1553 PCI_FUNC(d->devfn), 1554 i, r->addr, r->size); 1555 memory_region_add_subregion_overlap(r->address_space, 1556 r->addr, r->memory, 1); 1557 } 1558 } 1559 1560 pci_update_vga(d); 1561 } 1562 1563 static inline int pci_irq_disabled(PCIDevice *d) 1564 { 1565 return pci_get_word(d->config + PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE; 1566 } 1567 1568 /* Called after interrupt disabled field update in config space, 1569 * assert/deassert interrupts if necessary. 1570 * Gets original interrupt disable bit value (before update). */ 1571 static void pci_update_irq_disabled(PCIDevice *d, int was_irq_disabled) 1572 { 1573 int i, disabled = pci_irq_disabled(d); 1574 if (disabled == was_irq_disabled) 1575 return; 1576 for (i = 0; i < PCI_NUM_PINS; ++i) { 1577 int state = pci_irq_state(d, i); 1578 pci_change_irq_level(d, i, disabled ? -state : state); 1579 } 1580 } 1581 1582 uint32_t pci_default_read_config(PCIDevice *d, 1583 uint32_t address, int len) 1584 { 1585 uint32_t val = 0; 1586 1587 assert(address + len <= pci_config_size(d)); 1588 1589 if (pci_is_express_downstream_port(d) && 1590 ranges_overlap(address, len, d->exp.exp_cap + PCI_EXP_LNKSTA, 2)) { 1591 pcie_sync_bridge_lnk(d); 1592 } 1593 memcpy(&val, d->config + address, len); 1594 return le32_to_cpu(val); 1595 } 1596 1597 void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int l) 1598 { 1599 int i, was_irq_disabled = pci_irq_disabled(d); 1600 uint32_t val = val_in; 1601 1602 assert(addr + l <= pci_config_size(d)); 1603 1604 for (i = 0; i < l; val >>= 8, ++i) { 1605 uint8_t wmask = d->wmask[addr + i]; 1606 uint8_t w1cmask = d->w1cmask[addr + i]; 1607 assert(!(wmask & w1cmask)); 1608 d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask); 1609 d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */ 1610 } 1611 if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) || 1612 ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) || 1613 ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) || 1614 range_covers_byte(addr, l, PCI_COMMAND)) 1615 pci_update_mappings(d); 1616 1617 if (ranges_overlap(addr, l, PCI_COMMAND, 2)) { 1618 pci_update_irq_disabled(d, was_irq_disabled); 1619 memory_region_set_enabled(&d->bus_master_enable_region, 1620 (pci_get_word(d->config + PCI_COMMAND) 1621 & PCI_COMMAND_MASTER) && d->has_power); 1622 } 1623 1624 msi_write_config(d, addr, val_in, l); 1625 msix_write_config(d, addr, val_in, l); 1626 pcie_sriov_config_write(d, addr, val_in, l); 1627 } 1628 1629 /***********************************************************/ 1630 /* generic PCI irq support */ 1631 1632 /* 0 <= irq_num <= 3. level must be 0 or 1 */ 1633 static void pci_irq_handler(void *opaque, int irq_num, int level) 1634 { 1635 PCIDevice *pci_dev = opaque; 1636 int change; 1637 1638 assert(0 <= irq_num && irq_num < PCI_NUM_PINS); 1639 assert(level == 0 || level == 1); 1640 change = level - pci_irq_state(pci_dev, irq_num); 1641 if (!change) 1642 return; 1643 1644 pci_set_irq_state(pci_dev, irq_num, level); 1645 pci_update_irq_status(pci_dev); 1646 if (pci_irq_disabled(pci_dev)) 1647 return; 1648 pci_change_irq_level(pci_dev, irq_num, change); 1649 } 1650 1651 qemu_irq pci_allocate_irq(PCIDevice *pci_dev) 1652 { 1653 int intx = pci_intx(pci_dev); 1654 assert(0 <= intx && intx < PCI_NUM_PINS); 1655 1656 return qemu_allocate_irq(pci_irq_handler, pci_dev, intx); 1657 } 1658 1659 void pci_set_irq(PCIDevice *pci_dev, int level) 1660 { 1661 int intx = pci_intx(pci_dev); 1662 pci_irq_handler(pci_dev, intx, level); 1663 } 1664 1665 /* Special hooks used by device assignment */ 1666 void pci_bus_set_route_irq_fn(PCIBus *bus, pci_route_irq_fn route_intx_to_irq) 1667 { 1668 assert(pci_bus_is_root(bus)); 1669 bus->route_intx_to_irq = route_intx_to_irq; 1670 } 1671 1672 PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin) 1673 { 1674 PCIBus *bus; 1675 1676 do { 1677 int dev_irq = pin; 1678 bus = pci_get_bus(dev); 1679 pin = bus->map_irq(dev, pin); 1680 trace_pci_route_irq(dev_irq, DEVICE(dev)->canonical_path, pin, 1681 pci_bus_is_root(bus) ? "root-complex" 1682 : DEVICE(bus->parent_dev)->canonical_path); 1683 dev = bus->parent_dev; 1684 } while (dev); 1685 1686 if (!bus->route_intx_to_irq) { 1687 error_report("PCI: Bug - unimplemented PCI INTx routing (%s)", 1688 object_get_typename(OBJECT(bus->qbus.parent))); 1689 return (PCIINTxRoute) { PCI_INTX_DISABLED, -1 }; 1690 } 1691 1692 return bus->route_intx_to_irq(bus->irq_opaque, pin); 1693 } 1694 1695 bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new) 1696 { 1697 return old->mode != new->mode || old->irq != new->irq; 1698 } 1699 1700 void pci_bus_fire_intx_routing_notifier(PCIBus *bus) 1701 { 1702 PCIDevice *dev; 1703 PCIBus *sec; 1704 int i; 1705 1706 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 1707 dev = bus->devices[i]; 1708 if (dev && dev->intx_routing_notifier) { 1709 dev->intx_routing_notifier(dev); 1710 } 1711 } 1712 1713 QLIST_FOREACH(sec, &bus->child, sibling) { 1714 pci_bus_fire_intx_routing_notifier(sec); 1715 } 1716 } 1717 1718 void pci_device_set_intx_routing_notifier(PCIDevice *dev, 1719 PCIINTxRoutingNotifier notifier) 1720 { 1721 dev->intx_routing_notifier = notifier; 1722 } 1723 1724 /* 1725 * PCI-to-PCI bridge specification 1726 * 9.1: Interrupt routing. Table 9-1 1727 * 1728 * the PCI Express Base Specification, Revision 2.1 1729 * 2.2.8.1: INTx interrupt signaling - Rules 1730 * the Implementation Note 1731 * Table 2-20 1732 */ 1733 /* 1734 * 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD 1735 * 0-origin unlike PCI interrupt pin register. 1736 */ 1737 int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin) 1738 { 1739 return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin); 1740 } 1741 1742 /***********************************************************/ 1743 /* monitor info on PCI */ 1744 1745 static const pci_class_desc pci_class_descriptions[] = 1746 { 1747 { 0x0001, "VGA controller", "display"}, 1748 { 0x0100, "SCSI controller", "scsi"}, 1749 { 0x0101, "IDE controller", "ide"}, 1750 { 0x0102, "Floppy controller", "fdc"}, 1751 { 0x0103, "IPI controller", "ipi"}, 1752 { 0x0104, "RAID controller", "raid"}, 1753 { 0x0106, "SATA controller"}, 1754 { 0x0107, "SAS controller"}, 1755 { 0x0180, "Storage controller"}, 1756 { 0x0200, "Ethernet controller", "ethernet"}, 1757 { 0x0201, "Token Ring controller", "token-ring"}, 1758 { 0x0202, "FDDI controller", "fddi"}, 1759 { 0x0203, "ATM controller", "atm"}, 1760 { 0x0280, "Network controller"}, 1761 { 0x0300, "VGA controller", "display", 0x00ff}, 1762 { 0x0301, "XGA controller"}, 1763 { 0x0302, "3D controller"}, 1764 { 0x0380, "Display controller"}, 1765 { 0x0400, "Video controller", "video"}, 1766 { 0x0401, "Audio controller", "sound"}, 1767 { 0x0402, "Phone"}, 1768 { 0x0403, "Audio controller", "sound"}, 1769 { 0x0480, "Multimedia controller"}, 1770 { 0x0500, "RAM controller", "memory"}, 1771 { 0x0501, "Flash controller", "flash"}, 1772 { 0x0580, "Memory controller"}, 1773 { 0x0600, "Host bridge", "host"}, 1774 { 0x0601, "ISA bridge", "isa"}, 1775 { 0x0602, "EISA bridge", "eisa"}, 1776 { 0x0603, "MC bridge", "mca"}, 1777 { 0x0604, "PCI bridge", "pci-bridge"}, 1778 { 0x0605, "PCMCIA bridge", "pcmcia"}, 1779 { 0x0606, "NUBUS bridge", "nubus"}, 1780 { 0x0607, "CARDBUS bridge", "cardbus"}, 1781 { 0x0608, "RACEWAY bridge"}, 1782 { 0x0680, "Bridge"}, 1783 { 0x0700, "Serial port", "serial"}, 1784 { 0x0701, "Parallel port", "parallel"}, 1785 { 0x0800, "Interrupt controller", "interrupt-controller"}, 1786 { 0x0801, "DMA controller", "dma-controller"}, 1787 { 0x0802, "Timer", "timer"}, 1788 { 0x0803, "RTC", "rtc"}, 1789 { 0x0900, "Keyboard", "keyboard"}, 1790 { 0x0901, "Pen", "pen"}, 1791 { 0x0902, "Mouse", "mouse"}, 1792 { 0x0A00, "Dock station", "dock", 0x00ff}, 1793 { 0x0B00, "i386 cpu", "cpu", 0x00ff}, 1794 { 0x0c00, "Firewire controller", "firewire"}, 1795 { 0x0c01, "Access bus controller", "access-bus"}, 1796 { 0x0c02, "SSA controller", "ssa"}, 1797 { 0x0c03, "USB controller", "usb"}, 1798 { 0x0c04, "Fibre channel controller", "fibre-channel"}, 1799 { 0x0c05, "SMBus"}, 1800 { 0, NULL} 1801 }; 1802 1803 void pci_for_each_device_under_bus_reverse(PCIBus *bus, 1804 pci_bus_dev_fn fn, 1805 void *opaque) 1806 { 1807 PCIDevice *d; 1808 int devfn; 1809 1810 for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { 1811 d = bus->devices[ARRAY_SIZE(bus->devices) - 1 - devfn]; 1812 if (d) { 1813 fn(bus, d, opaque); 1814 } 1815 } 1816 } 1817 1818 void pci_for_each_device_reverse(PCIBus *bus, int bus_num, 1819 pci_bus_dev_fn fn, void *opaque) 1820 { 1821 bus = pci_find_bus_nr(bus, bus_num); 1822 1823 if (bus) { 1824 pci_for_each_device_under_bus_reverse(bus, fn, opaque); 1825 } 1826 } 1827 1828 void pci_for_each_device_under_bus(PCIBus *bus, 1829 pci_bus_dev_fn fn, void *opaque) 1830 { 1831 PCIDevice *d; 1832 int devfn; 1833 1834 for(devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { 1835 d = bus->devices[devfn]; 1836 if (d) { 1837 fn(bus, d, opaque); 1838 } 1839 } 1840 } 1841 1842 void pci_for_each_device(PCIBus *bus, int bus_num, 1843 pci_bus_dev_fn fn, void *opaque) 1844 { 1845 bus = pci_find_bus_nr(bus, bus_num); 1846 1847 if (bus) { 1848 pci_for_each_device_under_bus(bus, fn, opaque); 1849 } 1850 } 1851 1852 const pci_class_desc *get_class_desc(int class) 1853 { 1854 const pci_class_desc *desc; 1855 1856 desc = pci_class_descriptions; 1857 while (desc->desc && class != desc->class) { 1858 desc++; 1859 } 1860 1861 return desc; 1862 } 1863 1864 void pci_init_nic_devices(PCIBus *bus, const char *default_model) 1865 { 1866 qemu_create_nic_bus_devices(&bus->qbus, TYPE_PCI_DEVICE, default_model, 1867 "virtio", "virtio-net-pci"); 1868 } 1869 1870 bool pci_init_nic_in_slot(PCIBus *rootbus, const char *model, 1871 const char *alias, const char *devaddr) 1872 { 1873 NICInfo *nd = qemu_find_nic_info(model, true, alias); 1874 int dom, busnr, devfn; 1875 PCIDevice *pci_dev; 1876 unsigned slot; 1877 PCIBus *bus; 1878 1879 if (!nd) { 1880 return false; 1881 } 1882 1883 if (!devaddr || pci_parse_devaddr(devaddr, &dom, &busnr, &slot, NULL) < 0) { 1884 error_report("Invalid PCI device address %s for device %s", 1885 devaddr, model); 1886 exit(1); 1887 } 1888 1889 if (dom != 0) { 1890 error_report("No support for non-zero PCI domains"); 1891 exit(1); 1892 } 1893 1894 devfn = PCI_DEVFN(slot, 0); 1895 1896 bus = pci_find_bus_nr(rootbus, busnr); 1897 if (!bus) { 1898 error_report("Invalid PCI device address %s for device %s", 1899 devaddr, model); 1900 exit(1); 1901 } 1902 1903 pci_dev = pci_new(devfn, model); 1904 qdev_set_nic_properties(&pci_dev->qdev, nd); 1905 pci_realize_and_unref(pci_dev, bus, &error_fatal); 1906 return true; 1907 } 1908 1909 PCIDevice *pci_vga_init(PCIBus *bus) 1910 { 1911 vga_interface_created = true; 1912 switch (vga_interface_type) { 1913 case VGA_CIRRUS: 1914 return pci_create_simple(bus, -1, "cirrus-vga"); 1915 case VGA_QXL: 1916 return pci_create_simple(bus, -1, "qxl-vga"); 1917 case VGA_STD: 1918 return pci_create_simple(bus, -1, "VGA"); 1919 case VGA_VMWARE: 1920 return pci_create_simple(bus, -1, "vmware-svga"); 1921 case VGA_VIRTIO: 1922 return pci_create_simple(bus, -1, "virtio-vga"); 1923 case VGA_NONE: 1924 default: /* Other non-PCI types. Checking for unsupported types is already 1925 done in vl.c. */ 1926 return NULL; 1927 } 1928 } 1929 1930 /* Whether a given bus number is in range of the secondary 1931 * bus of the given bridge device. */ 1932 static bool pci_secondary_bus_in_range(PCIDevice *dev, int bus_num) 1933 { 1934 return !(pci_get_word(dev->config + PCI_BRIDGE_CONTROL) & 1935 PCI_BRIDGE_CTL_BUS_RESET) /* Don't walk the bus if it's reset. */ && 1936 dev->config[PCI_SECONDARY_BUS] <= bus_num && 1937 bus_num <= dev->config[PCI_SUBORDINATE_BUS]; 1938 } 1939 1940 /* Whether a given bus number is in a range of a root bus */ 1941 static bool pci_root_bus_in_range(PCIBus *bus, int bus_num) 1942 { 1943 int i; 1944 1945 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 1946 PCIDevice *dev = bus->devices[i]; 1947 1948 if (dev && IS_PCI_BRIDGE(dev)) { 1949 if (pci_secondary_bus_in_range(dev, bus_num)) { 1950 return true; 1951 } 1952 } 1953 } 1954 1955 return false; 1956 } 1957 1958 PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num) 1959 { 1960 PCIBus *sec; 1961 1962 if (!bus) { 1963 return NULL; 1964 } 1965 1966 if (pci_bus_num(bus) == bus_num) { 1967 return bus; 1968 } 1969 1970 /* Consider all bus numbers in range for the host pci bridge. */ 1971 if (!pci_bus_is_root(bus) && 1972 !pci_secondary_bus_in_range(bus->parent_dev, bus_num)) { 1973 return NULL; 1974 } 1975 1976 /* try child bus */ 1977 for (; bus; bus = sec) { 1978 QLIST_FOREACH(sec, &bus->child, sibling) { 1979 if (pci_bus_num(sec) == bus_num) { 1980 return sec; 1981 } 1982 /* PXB buses assumed to be children of bus 0 */ 1983 if (pci_bus_is_root(sec)) { 1984 if (pci_root_bus_in_range(sec, bus_num)) { 1985 break; 1986 } 1987 } else { 1988 if (pci_secondary_bus_in_range(sec->parent_dev, bus_num)) { 1989 break; 1990 } 1991 } 1992 } 1993 } 1994 1995 return NULL; 1996 } 1997 1998 void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin, 1999 pci_bus_fn end, void *parent_state) 2000 { 2001 PCIBus *sec; 2002 void *state; 2003 2004 if (!bus) { 2005 return; 2006 } 2007 2008 if (begin) { 2009 state = begin(bus, parent_state); 2010 } else { 2011 state = parent_state; 2012 } 2013 2014 QLIST_FOREACH(sec, &bus->child, sibling) { 2015 pci_for_each_bus_depth_first(sec, begin, end, state); 2016 } 2017 2018 if (end) { 2019 end(bus, state); 2020 } 2021 } 2022 2023 2024 PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn) 2025 { 2026 bus = pci_find_bus_nr(bus, bus_num); 2027 2028 if (!bus) 2029 return NULL; 2030 2031 return bus->devices[devfn]; 2032 } 2033 2034 #define ONBOARD_INDEX_MAX (16 * 1024 - 1) 2035 2036 static void pci_qdev_realize(DeviceState *qdev, Error **errp) 2037 { 2038 PCIDevice *pci_dev = (PCIDevice *)qdev; 2039 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 2040 ObjectClass *klass = OBJECT_CLASS(pc); 2041 Error *local_err = NULL; 2042 bool is_default_rom; 2043 uint16_t class_id; 2044 2045 /* 2046 * capped by systemd (see: udev-builtin-net_id.c) 2047 * as it's the only known user honor it to avoid users 2048 * misconfigure QEMU and then wonder why acpi-index doesn't work 2049 */ 2050 if (pci_dev->acpi_index > ONBOARD_INDEX_MAX) { 2051 error_setg(errp, "acpi-index should be less or equal to %u", 2052 ONBOARD_INDEX_MAX); 2053 return; 2054 } 2055 2056 /* 2057 * make sure that acpi-index is unique across all present PCI devices 2058 */ 2059 if (pci_dev->acpi_index) { 2060 GSequence *used_indexes = pci_acpi_index_list(); 2061 2062 if (g_sequence_lookup(used_indexes, 2063 GINT_TO_POINTER(pci_dev->acpi_index), 2064 g_cmp_uint32, NULL)) { 2065 error_setg(errp, "a PCI device with acpi-index = %" PRIu32 2066 " already exist", pci_dev->acpi_index); 2067 return; 2068 } 2069 g_sequence_insert_sorted(used_indexes, 2070 GINT_TO_POINTER(pci_dev->acpi_index), 2071 g_cmp_uint32, NULL); 2072 } 2073 2074 if (pci_dev->romsize != UINT32_MAX && !is_power_of_2(pci_dev->romsize)) { 2075 error_setg(errp, "ROM size %u is not a power of two", pci_dev->romsize); 2076 return; 2077 } 2078 2079 /* initialize cap_present for pci_is_express() and pci_config_size(), 2080 * Note that hybrid PCIs are not set automatically and need to manage 2081 * QEMU_PCI_CAP_EXPRESS manually */ 2082 if (object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE) && 2083 !object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE)) { 2084 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; 2085 } 2086 2087 if (object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE)) { 2088 pci_dev->cap_present |= QEMU_PCIE_CAP_CXL; 2089 } 2090 2091 pci_dev = do_pci_register_device(pci_dev, 2092 object_get_typename(OBJECT(qdev)), 2093 pci_dev->devfn, errp); 2094 if (pci_dev == NULL) 2095 return; 2096 2097 if (pc->realize) { 2098 pc->realize(pci_dev, &local_err); 2099 if (local_err) { 2100 error_propagate(errp, local_err); 2101 do_pci_unregister_device(pci_dev); 2102 return; 2103 } 2104 } 2105 2106 /* 2107 * A PCIe Downstream Port that do not have ARI Forwarding enabled must 2108 * associate only Device 0 with the device attached to the bus 2109 * representing the Link from the Port (PCIe base spec rev 4.0 ver 0.3, 2110 * sec 7.3.1). 2111 * With ARI, PCI_SLOT() can return non-zero value as the traditional 2112 * 5-bit Device Number and 3-bit Function Number fields in its associated 2113 * Routing IDs, Requester IDs and Completer IDs are interpreted as a 2114 * single 8-bit Function Number. Hence, ignore ARI capable devices. 2115 */ 2116 if (pci_is_express(pci_dev) && 2117 !pcie_find_capability(pci_dev, PCI_EXT_CAP_ID_ARI) && 2118 pcie_has_upstream_port(pci_dev) && 2119 PCI_SLOT(pci_dev->devfn)) { 2120 warn_report("PCI: slot %d is not valid for %s," 2121 " parent device only allows plugging into slot 0.", 2122 PCI_SLOT(pci_dev->devfn), pci_dev->name); 2123 } 2124 2125 if (pci_dev->failover_pair_id) { 2126 if (!pci_bus_is_express(pci_get_bus(pci_dev))) { 2127 error_setg(errp, "failover primary device must be on " 2128 "PCIExpress bus"); 2129 pci_qdev_unrealize(DEVICE(pci_dev)); 2130 return; 2131 } 2132 class_id = pci_get_word(pci_dev->config + PCI_CLASS_DEVICE); 2133 if (class_id != PCI_CLASS_NETWORK_ETHERNET) { 2134 error_setg(errp, "failover primary device is not an " 2135 "Ethernet device"); 2136 pci_qdev_unrealize(DEVICE(pci_dev)); 2137 return; 2138 } 2139 if ((pci_dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) 2140 || (PCI_FUNC(pci_dev->devfn) != 0)) { 2141 error_setg(errp, "failover: primary device must be in its own " 2142 "PCI slot"); 2143 pci_qdev_unrealize(DEVICE(pci_dev)); 2144 return; 2145 } 2146 qdev->allow_unplug_during_migration = true; 2147 } 2148 2149 /* rom loading */ 2150 is_default_rom = false; 2151 if (pci_dev->romfile == NULL && pc->romfile != NULL) { 2152 pci_dev->romfile = g_strdup(pc->romfile); 2153 is_default_rom = true; 2154 } 2155 2156 pci_add_option_rom(pci_dev, is_default_rom, &local_err); 2157 if (local_err) { 2158 error_propagate(errp, local_err); 2159 pci_qdev_unrealize(DEVICE(pci_dev)); 2160 return; 2161 } 2162 2163 pci_set_power(pci_dev, true); 2164 2165 pci_dev->msi_trigger = pci_msi_trigger; 2166 } 2167 2168 static PCIDevice *pci_new_internal(int devfn, bool multifunction, 2169 const char *name) 2170 { 2171 DeviceState *dev; 2172 2173 dev = qdev_new(name); 2174 qdev_prop_set_int32(dev, "addr", devfn); 2175 qdev_prop_set_bit(dev, "multifunction", multifunction); 2176 return PCI_DEVICE(dev); 2177 } 2178 2179 PCIDevice *pci_new_multifunction(int devfn, const char *name) 2180 { 2181 return pci_new_internal(devfn, true, name); 2182 } 2183 2184 PCIDevice *pci_new(int devfn, const char *name) 2185 { 2186 return pci_new_internal(devfn, false, name); 2187 } 2188 2189 bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp) 2190 { 2191 return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp); 2192 } 2193 2194 PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn, 2195 const char *name) 2196 { 2197 PCIDevice *dev = pci_new_multifunction(devfn, name); 2198 pci_realize_and_unref(dev, bus, &error_fatal); 2199 return dev; 2200 } 2201 2202 PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name) 2203 { 2204 PCIDevice *dev = pci_new(devfn, name); 2205 pci_realize_and_unref(dev, bus, &error_fatal); 2206 return dev; 2207 } 2208 2209 static uint8_t pci_find_space(PCIDevice *pdev, uint8_t size) 2210 { 2211 int offset = PCI_CONFIG_HEADER_SIZE; 2212 int i; 2213 for (i = PCI_CONFIG_HEADER_SIZE; i < PCI_CONFIG_SPACE_SIZE; ++i) { 2214 if (pdev->used[i]) 2215 offset = i + 1; 2216 else if (i - offset + 1 == size) 2217 return offset; 2218 } 2219 return 0; 2220 } 2221 2222 static uint8_t pci_find_capability_list(PCIDevice *pdev, uint8_t cap_id, 2223 uint8_t *prev_p) 2224 { 2225 uint8_t next, prev; 2226 2227 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST)) 2228 return 0; 2229 2230 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]); 2231 prev = next + PCI_CAP_LIST_NEXT) 2232 if (pdev->config[next + PCI_CAP_LIST_ID] == cap_id) 2233 break; 2234 2235 if (prev_p) 2236 *prev_p = prev; 2237 return next; 2238 } 2239 2240 static uint8_t pci_find_capability_at_offset(PCIDevice *pdev, uint8_t offset) 2241 { 2242 uint8_t next, prev, found = 0; 2243 2244 if (!(pdev->used[offset])) { 2245 return 0; 2246 } 2247 2248 assert(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST); 2249 2250 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]); 2251 prev = next + PCI_CAP_LIST_NEXT) { 2252 if (next <= offset && next > found) { 2253 found = next; 2254 } 2255 } 2256 return found; 2257 } 2258 2259 /* Patch the PCI vendor and device ids in a PCI rom image if necessary. 2260 This is needed for an option rom which is used for more than one device. */ 2261 static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size) 2262 { 2263 uint16_t vendor_id; 2264 uint16_t device_id; 2265 uint16_t rom_vendor_id; 2266 uint16_t rom_device_id; 2267 uint16_t rom_magic; 2268 uint16_t pcir_offset; 2269 uint8_t checksum; 2270 2271 /* Words in rom data are little endian (like in PCI configuration), 2272 so they can be read / written with pci_get_word / pci_set_word. */ 2273 2274 /* Only a valid rom will be patched. */ 2275 rom_magic = pci_get_word(ptr); 2276 if (rom_magic != 0xaa55) { 2277 PCI_DPRINTF("Bad ROM magic %04x\n", rom_magic); 2278 return; 2279 } 2280 pcir_offset = pci_get_word(ptr + 0x18); 2281 if (pcir_offset + 8 >= size || memcmp(ptr + pcir_offset, "PCIR", 4)) { 2282 PCI_DPRINTF("Bad PCIR offset 0x%x or signature\n", pcir_offset); 2283 return; 2284 } 2285 2286 vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID); 2287 device_id = pci_get_word(pdev->config + PCI_DEVICE_ID); 2288 rom_vendor_id = pci_get_word(ptr + pcir_offset + 4); 2289 rom_device_id = pci_get_word(ptr + pcir_offset + 6); 2290 2291 PCI_DPRINTF("%s: ROM id %04x%04x / PCI id %04x%04x\n", pdev->romfile, 2292 vendor_id, device_id, rom_vendor_id, rom_device_id); 2293 2294 checksum = ptr[6]; 2295 2296 if (vendor_id != rom_vendor_id) { 2297 /* Patch vendor id and checksum (at offset 6 for etherboot roms). */ 2298 checksum += (uint8_t)rom_vendor_id + (uint8_t)(rom_vendor_id >> 8); 2299 checksum -= (uint8_t)vendor_id + (uint8_t)(vendor_id >> 8); 2300 PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum); 2301 ptr[6] = checksum; 2302 pci_set_word(ptr + pcir_offset + 4, vendor_id); 2303 } 2304 2305 if (device_id != rom_device_id) { 2306 /* Patch device id and checksum (at offset 6 for etherboot roms). */ 2307 checksum += (uint8_t)rom_device_id + (uint8_t)(rom_device_id >> 8); 2308 checksum -= (uint8_t)device_id + (uint8_t)(device_id >> 8); 2309 PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum); 2310 ptr[6] = checksum; 2311 pci_set_word(ptr + pcir_offset + 6, device_id); 2312 } 2313 } 2314 2315 /* Add an option rom for the device */ 2316 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, 2317 Error **errp) 2318 { 2319 int64_t size = 0; 2320 g_autofree char *path = NULL; 2321 char name[32]; 2322 const VMStateDescription *vmsd; 2323 2324 /* 2325 * In case of incoming migration ROM will come with migration stream, no 2326 * reason to load the file. Neither we want to fail if local ROM file 2327 * mismatches with specified romsize. 2328 */ 2329 bool load_file = !runstate_check(RUN_STATE_INMIGRATE); 2330 2331 if (!pdev->romfile || !strlen(pdev->romfile)) { 2332 return; 2333 } 2334 2335 if (!pdev->rom_bar) { 2336 /* 2337 * Load rom via fw_cfg instead of creating a rom bar, 2338 * for 0.11 compatibility. 2339 */ 2340 int class = pci_get_word(pdev->config + PCI_CLASS_DEVICE); 2341 2342 /* 2343 * Hot-plugged devices can't use the option ROM 2344 * if the rom bar is disabled. 2345 */ 2346 if (DEVICE(pdev)->hotplugged) { 2347 error_setg(errp, "Hot-plugged device without ROM bar" 2348 " can't have an option ROM"); 2349 return; 2350 } 2351 2352 if (class == 0x0300) { 2353 rom_add_vga(pdev->romfile); 2354 } else { 2355 rom_add_option(pdev->romfile, -1); 2356 } 2357 return; 2358 } 2359 2360 if (load_file || pdev->romsize == UINT32_MAX) { 2361 path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile); 2362 if (path == NULL) { 2363 path = g_strdup(pdev->romfile); 2364 } 2365 2366 size = get_image_size(path); 2367 if (size < 0) { 2368 error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile); 2369 return; 2370 } else if (size == 0) { 2371 error_setg(errp, "romfile \"%s\" is empty", pdev->romfile); 2372 return; 2373 } else if (size > 2 * GiB) { 2374 error_setg(errp, 2375 "romfile \"%s\" too large (size cannot exceed 2 GiB)", 2376 pdev->romfile); 2377 return; 2378 } 2379 if (pdev->romsize != UINT_MAX) { 2380 if (size > pdev->romsize) { 2381 error_setg(errp, "romfile \"%s\" (%u bytes) " 2382 "is too large for ROM size %u", 2383 pdev->romfile, (uint32_t)size, pdev->romsize); 2384 return; 2385 } 2386 } else { 2387 pdev->romsize = pow2ceil(size); 2388 } 2389 } 2390 2391 vmsd = qdev_get_vmsd(DEVICE(pdev)); 2392 snprintf(name, sizeof(name), "%s.rom", 2393 vmsd ? vmsd->name : object_get_typename(OBJECT(pdev))); 2394 2395 pdev->has_rom = true; 2396 memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize, 2397 &error_fatal); 2398 2399 if (load_file) { 2400 void *ptr = memory_region_get_ram_ptr(&pdev->rom); 2401 2402 if (load_image_size(path, ptr, size) < 0) { 2403 error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile); 2404 return; 2405 } 2406 2407 if (is_default_rom) { 2408 /* Only the default rom images will be patched (if needed). */ 2409 pci_patch_ids(pdev, ptr, size); 2410 } 2411 } 2412 2413 pci_register_bar(pdev, PCI_ROM_SLOT, 0, &pdev->rom); 2414 } 2415 2416 static void pci_del_option_rom(PCIDevice *pdev) 2417 { 2418 if (!pdev->has_rom) 2419 return; 2420 2421 vmstate_unregister_ram(&pdev->rom, &pdev->qdev); 2422 pdev->has_rom = false; 2423 } 2424 2425 /* 2426 * On success, pci_add_capability() returns a positive value 2427 * that the offset of the pci capability. 2428 * On failure, it sets an error and returns a negative error 2429 * code. 2430 */ 2431 int pci_add_capability(PCIDevice *pdev, uint8_t cap_id, 2432 uint8_t offset, uint8_t size, 2433 Error **errp) 2434 { 2435 uint8_t *config; 2436 int i, overlapping_cap; 2437 2438 if (!offset) { 2439 offset = pci_find_space(pdev, size); 2440 /* out of PCI config space is programming error */ 2441 assert(offset); 2442 } else { 2443 /* Verify that capabilities don't overlap. Note: device assignment 2444 * depends on this check to verify that the device is not broken. 2445 * Should never trigger for emulated devices, but it's helpful 2446 * for debugging these. */ 2447 for (i = offset; i < offset + size; i++) { 2448 overlapping_cap = pci_find_capability_at_offset(pdev, i); 2449 if (overlapping_cap) { 2450 error_setg(errp, "%s:%02x:%02x.%x " 2451 "Attempt to add PCI capability %x at offset " 2452 "%x overlaps existing capability %x at offset %x", 2453 pci_root_bus_path(pdev), pci_dev_bus_num(pdev), 2454 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 2455 cap_id, offset, overlapping_cap, i); 2456 return -EINVAL; 2457 } 2458 } 2459 } 2460 2461 config = pdev->config + offset; 2462 config[PCI_CAP_LIST_ID] = cap_id; 2463 config[PCI_CAP_LIST_NEXT] = pdev->config[PCI_CAPABILITY_LIST]; 2464 pdev->config[PCI_CAPABILITY_LIST] = offset; 2465 pdev->config[PCI_STATUS] |= PCI_STATUS_CAP_LIST; 2466 memset(pdev->used + offset, 0xFF, QEMU_ALIGN_UP(size, 4)); 2467 /* Make capability read-only by default */ 2468 memset(pdev->wmask + offset, 0, size); 2469 /* Check capability by default */ 2470 memset(pdev->cmask + offset, 0xFF, size); 2471 return offset; 2472 } 2473 2474 /* Unlink capability from the pci config space. */ 2475 void pci_del_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t size) 2476 { 2477 uint8_t prev, offset = pci_find_capability_list(pdev, cap_id, &prev); 2478 if (!offset) 2479 return; 2480 pdev->config[prev] = pdev->config[offset + PCI_CAP_LIST_NEXT]; 2481 /* Make capability writable again */ 2482 memset(pdev->wmask + offset, 0xff, size); 2483 memset(pdev->w1cmask + offset, 0, size); 2484 /* Clear cmask as device-specific registers can't be checked */ 2485 memset(pdev->cmask + offset, 0, size); 2486 memset(pdev->used + offset, 0, QEMU_ALIGN_UP(size, 4)); 2487 2488 if (!pdev->config[PCI_CAPABILITY_LIST]) 2489 pdev->config[PCI_STATUS] &= ~PCI_STATUS_CAP_LIST; 2490 } 2491 2492 uint8_t pci_find_capability(PCIDevice *pdev, uint8_t cap_id) 2493 { 2494 return pci_find_capability_list(pdev, cap_id, NULL); 2495 } 2496 2497 static char *pci_dev_fw_name(DeviceState *dev, char *buf, int len) 2498 { 2499 PCIDevice *d = (PCIDevice *)dev; 2500 const char *name = NULL; 2501 const pci_class_desc *desc = pci_class_descriptions; 2502 int class = pci_get_word(d->config + PCI_CLASS_DEVICE); 2503 2504 while (desc->desc && 2505 (class & ~desc->fw_ign_bits) != 2506 (desc->class & ~desc->fw_ign_bits)) { 2507 desc++; 2508 } 2509 2510 if (desc->desc) { 2511 name = desc->fw_name; 2512 } 2513 2514 if (name) { 2515 pstrcpy(buf, len, name); 2516 } else { 2517 snprintf(buf, len, "pci%04x,%04x", 2518 pci_get_word(d->config + PCI_VENDOR_ID), 2519 pci_get_word(d->config + PCI_DEVICE_ID)); 2520 } 2521 2522 return buf; 2523 } 2524 2525 static char *pcibus_get_fw_dev_path(DeviceState *dev) 2526 { 2527 PCIDevice *d = (PCIDevice *)dev; 2528 char name[33]; 2529 int has_func = !!PCI_FUNC(d->devfn); 2530 2531 return g_strdup_printf("%s@%x%s%.*x", 2532 pci_dev_fw_name(dev, name, sizeof(name)), 2533 PCI_SLOT(d->devfn), 2534 has_func ? "," : "", 2535 has_func, 2536 PCI_FUNC(d->devfn)); 2537 } 2538 2539 static char *pcibus_get_dev_path(DeviceState *dev) 2540 { 2541 PCIDevice *d = container_of(dev, PCIDevice, qdev); 2542 PCIDevice *t; 2543 int slot_depth; 2544 /* Path format: Domain:00:Slot.Function:Slot.Function....:Slot.Function. 2545 * 00 is added here to make this format compatible with 2546 * domain:Bus:Slot.Func for systems without nested PCI bridges. 2547 * Slot.Function list specifies the slot and function numbers for all 2548 * devices on the path from root to the specific device. */ 2549 const char *root_bus_path; 2550 int root_bus_len; 2551 char slot[] = ":SS.F"; 2552 int slot_len = sizeof slot - 1 /* For '\0' */; 2553 int path_len; 2554 char *path, *p; 2555 int s; 2556 2557 root_bus_path = pci_root_bus_path(d); 2558 root_bus_len = strlen(root_bus_path); 2559 2560 /* Calculate # of slots on path between device and root. */; 2561 slot_depth = 0; 2562 for (t = d; t; t = pci_get_bus(t)->parent_dev) { 2563 ++slot_depth; 2564 } 2565 2566 path_len = root_bus_len + slot_len * slot_depth; 2567 2568 /* Allocate memory, fill in the terminating null byte. */ 2569 path = g_malloc(path_len + 1 /* For '\0' */); 2570 path[path_len] = '\0'; 2571 2572 memcpy(path, root_bus_path, root_bus_len); 2573 2574 /* Fill in slot numbers. We walk up from device to root, so need to print 2575 * them in the reverse order, last to first. */ 2576 p = path + path_len; 2577 for (t = d; t; t = pci_get_bus(t)->parent_dev) { 2578 p -= slot_len; 2579 s = snprintf(slot, sizeof slot, ":%02x.%x", 2580 PCI_SLOT(t->devfn), PCI_FUNC(t->devfn)); 2581 assert(s == slot_len); 2582 memcpy(p, slot, slot_len); 2583 } 2584 2585 return path; 2586 } 2587 2588 static int pci_qdev_find_recursive(PCIBus *bus, 2589 const char *id, PCIDevice **pdev) 2590 { 2591 DeviceState *qdev = qdev_find_recursive(&bus->qbus, id); 2592 if (!qdev) { 2593 return -ENODEV; 2594 } 2595 2596 /* roughly check if given qdev is pci device */ 2597 if (object_dynamic_cast(OBJECT(qdev), TYPE_PCI_DEVICE)) { 2598 *pdev = PCI_DEVICE(qdev); 2599 return 0; 2600 } 2601 return -EINVAL; 2602 } 2603 2604 int pci_qdev_find_device(const char *id, PCIDevice **pdev) 2605 { 2606 PCIHostState *host_bridge; 2607 int rc = -ENODEV; 2608 2609 QLIST_FOREACH(host_bridge, &pci_host_bridges, next) { 2610 int tmp = pci_qdev_find_recursive(host_bridge->bus, id, pdev); 2611 if (!tmp) { 2612 rc = 0; 2613 break; 2614 } 2615 if (tmp != -ENODEV) { 2616 rc = tmp; 2617 } 2618 } 2619 2620 return rc; 2621 } 2622 2623 MemoryRegion *pci_address_space(PCIDevice *dev) 2624 { 2625 return pci_get_bus(dev)->address_space_mem; 2626 } 2627 2628 MemoryRegion *pci_address_space_io(PCIDevice *dev) 2629 { 2630 return pci_get_bus(dev)->address_space_io; 2631 } 2632 2633 static void pci_device_class_init(ObjectClass *klass, void *data) 2634 { 2635 DeviceClass *k = DEVICE_CLASS(klass); 2636 2637 k->realize = pci_qdev_realize; 2638 k->unrealize = pci_qdev_unrealize; 2639 k->bus_type = TYPE_PCI_BUS; 2640 device_class_set_props(k, pci_props); 2641 object_class_property_set_description( 2642 klass, "x-max-bounce-buffer-size", 2643 "Maximum buffer size allocated for bounce buffers used for mapped " 2644 "access to indirect DMA memory"); 2645 } 2646 2647 static void pci_device_class_base_init(ObjectClass *klass, void *data) 2648 { 2649 if (!object_class_is_abstract(klass)) { 2650 ObjectClass *conventional = 2651 object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE); 2652 ObjectClass *pcie = 2653 object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE); 2654 ObjectClass *cxl = 2655 object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE); 2656 assert(conventional || pcie || cxl); 2657 } 2658 } 2659 2660 /* 2661 * Get IOMMU root bus, aliased bus and devfn of a PCI device 2662 * 2663 * IOMMU root bus is needed by all call sites to call into iommu_ops. 2664 * For call sites which don't need aliased BDF, passing NULL to 2665 * aliased_[bus|devfn] is allowed. 2666 * 2667 * @piommu_bus: return root #PCIBus backed by an IOMMU for the PCI device. 2668 * 2669 * @aliased_bus: return aliased #PCIBus of the PCI device, optional. 2670 * 2671 * @aliased_devfn: return aliased devfn of the PCI device, optional. 2672 */ 2673 static void pci_device_get_iommu_bus_devfn(PCIDevice *dev, 2674 PCIBus **piommu_bus, 2675 PCIBus **aliased_bus, 2676 int *aliased_devfn) 2677 { 2678 PCIBus *bus = pci_get_bus(dev); 2679 PCIBus *iommu_bus = bus; 2680 int devfn = dev->devfn; 2681 2682 while (iommu_bus && !iommu_bus->iommu_ops && iommu_bus->parent_dev) { 2683 PCIBus *parent_bus = pci_get_bus(iommu_bus->parent_dev); 2684 2685 /* 2686 * The requester ID of the provided device may be aliased, as seen from 2687 * the IOMMU, due to topology limitations. The IOMMU relies on a 2688 * requester ID to provide a unique AddressSpace for devices, but 2689 * conventional PCI buses pre-date such concepts. Instead, the PCIe- 2690 * to-PCI bridge creates and accepts transactions on behalf of down- 2691 * stream devices. When doing so, all downstream devices are masked 2692 * (aliased) behind a single requester ID. The requester ID used 2693 * depends on the format of the bridge devices. Proper PCIe-to-PCI 2694 * bridges, with a PCIe capability indicating such, follow the 2695 * guidelines of chapter 2.3 of the PCIe-to-PCI/X bridge specification, 2696 * where the bridge uses the seconary bus as the bridge portion of the 2697 * requester ID and devfn of 00.0. For other bridges, typically those 2698 * found on the root complex such as the dmi-to-pci-bridge, we follow 2699 * the convention of typical bare-metal hardware, which uses the 2700 * requester ID of the bridge itself. There are device specific 2701 * exceptions to these rules, but these are the defaults that the 2702 * Linux kernel uses when determining DMA aliases itself and believed 2703 * to be true for the bare metal equivalents of the devices emulated 2704 * in QEMU. 2705 */ 2706 if (!pci_bus_is_express(iommu_bus)) { 2707 PCIDevice *parent = iommu_bus->parent_dev; 2708 2709 if (pci_is_express(parent) && 2710 pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) { 2711 devfn = PCI_DEVFN(0, 0); 2712 bus = iommu_bus; 2713 } else { 2714 devfn = parent->devfn; 2715 bus = parent_bus; 2716 } 2717 } 2718 2719 iommu_bus = parent_bus; 2720 } 2721 2722 assert(0 <= devfn && devfn < PCI_DEVFN_MAX); 2723 assert(iommu_bus); 2724 2725 if (pci_bus_bypass_iommu(bus) || !iommu_bus->iommu_ops) { 2726 iommu_bus = NULL; 2727 } 2728 2729 *piommu_bus = iommu_bus; 2730 2731 if (aliased_bus) { 2732 *aliased_bus = bus; 2733 } 2734 2735 if (aliased_devfn) { 2736 *aliased_devfn = devfn; 2737 } 2738 } 2739 2740 AddressSpace *pci_device_iommu_address_space(PCIDevice *dev) 2741 { 2742 PCIBus *bus; 2743 PCIBus *iommu_bus; 2744 int devfn; 2745 2746 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); 2747 if (iommu_bus) { 2748 return iommu_bus->iommu_ops->get_address_space(bus, 2749 iommu_bus->iommu_opaque, devfn); 2750 } 2751 return &address_space_memory; 2752 } 2753 2754 bool pci_device_set_iommu_device(PCIDevice *dev, HostIOMMUDevice *hiod, 2755 Error **errp) 2756 { 2757 PCIBus *iommu_bus, *aliased_bus; 2758 int aliased_devfn; 2759 2760 /* set_iommu_device requires device's direct BDF instead of aliased BDF */ 2761 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, 2762 &aliased_bus, &aliased_devfn); 2763 if (iommu_bus && iommu_bus->iommu_ops->set_iommu_device) { 2764 hiod->aliased_bus = aliased_bus; 2765 hiod->aliased_devfn = aliased_devfn; 2766 return iommu_bus->iommu_ops->set_iommu_device(pci_get_bus(dev), 2767 iommu_bus->iommu_opaque, 2768 dev->devfn, hiod, errp); 2769 } 2770 return true; 2771 } 2772 2773 void pci_device_unset_iommu_device(PCIDevice *dev) 2774 { 2775 PCIBus *iommu_bus; 2776 2777 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, NULL, NULL); 2778 if (iommu_bus && iommu_bus->iommu_ops->unset_iommu_device) { 2779 return iommu_bus->iommu_ops->unset_iommu_device(pci_get_bus(dev), 2780 iommu_bus->iommu_opaque, 2781 dev->devfn); 2782 } 2783 } 2784 2785 void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque) 2786 { 2787 /* 2788 * If called, pci_setup_iommu() should provide a minimum set of 2789 * useful callbacks for the bus. 2790 */ 2791 assert(ops); 2792 assert(ops->get_address_space); 2793 2794 bus->iommu_ops = ops; 2795 bus->iommu_opaque = opaque; 2796 } 2797 2798 static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque) 2799 { 2800 Range *range = opaque; 2801 uint16_t cmd = pci_get_word(dev->config + PCI_COMMAND); 2802 int i; 2803 2804 if (!(cmd & PCI_COMMAND_MEMORY)) { 2805 return; 2806 } 2807 2808 if (IS_PCI_BRIDGE(dev)) { 2809 pcibus_t base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); 2810 pcibus_t limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); 2811 2812 base = MAX(base, 0x1ULL << 32); 2813 2814 if (limit >= base) { 2815 Range pref_range; 2816 range_set_bounds(&pref_range, base, limit); 2817 range_extend(range, &pref_range); 2818 } 2819 } 2820 for (i = 0; i < PCI_NUM_REGIONS; ++i) { 2821 PCIIORegion *r = &dev->io_regions[i]; 2822 pcibus_t lob, upb; 2823 Range region_range; 2824 2825 if (!r->size || 2826 (r->type & PCI_BASE_ADDRESS_SPACE_IO) || 2827 !(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64)) { 2828 continue; 2829 } 2830 2831 lob = pci_bar_address(dev, i, r->type, r->size); 2832 upb = lob + r->size - 1; 2833 if (lob == PCI_BAR_UNMAPPED) { 2834 continue; 2835 } 2836 2837 lob = MAX(lob, 0x1ULL << 32); 2838 2839 if (upb >= lob) { 2840 range_set_bounds(®ion_range, lob, upb); 2841 range_extend(range, ®ion_range); 2842 } 2843 } 2844 } 2845 2846 void pci_bus_get_w64_range(PCIBus *bus, Range *range) 2847 { 2848 range_make_empty(range); 2849 pci_for_each_device_under_bus(bus, pci_dev_get_w64, range); 2850 } 2851 2852 static bool pcie_has_upstream_port(PCIDevice *dev) 2853 { 2854 PCIDevice *parent_dev = pci_bridge_get_device(pci_get_bus(dev)); 2855 2856 /* Device associated with an upstream port. 2857 * As there are several types of these, it's easier to check the 2858 * parent device: upstream ports are always connected to 2859 * root or downstream ports. 2860 */ 2861 return parent_dev && 2862 pci_is_express(parent_dev) && 2863 parent_dev->exp.exp_cap && 2864 (pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_ROOT_PORT || 2865 pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_DOWNSTREAM); 2866 } 2867 2868 PCIDevice *pci_get_function_0(PCIDevice *pci_dev) 2869 { 2870 PCIBus *bus = pci_get_bus(pci_dev); 2871 2872 if(pcie_has_upstream_port(pci_dev)) { 2873 /* With an upstream PCIe port, we only support 1 device at slot 0 */ 2874 return bus->devices[0]; 2875 } else { 2876 /* Other bus types might support multiple devices at slots 0-31 */ 2877 return bus->devices[PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 0)]; 2878 } 2879 } 2880 2881 MSIMessage pci_get_msi_message(PCIDevice *dev, int vector) 2882 { 2883 MSIMessage msg; 2884 if (msix_enabled(dev)) { 2885 msg = msix_get_message(dev, vector); 2886 } else if (msi_enabled(dev)) { 2887 msg = msi_get_message(dev, vector); 2888 } else { 2889 /* Should never happen */ 2890 error_report("%s: unknown interrupt type", __func__); 2891 abort(); 2892 } 2893 return msg; 2894 } 2895 2896 void pci_set_power(PCIDevice *d, bool state) 2897 { 2898 if (d->has_power == state) { 2899 return; 2900 } 2901 2902 d->has_power = state; 2903 pci_update_mappings(d); 2904 memory_region_set_enabled(&d->bus_master_enable_region, 2905 (pci_get_word(d->config + PCI_COMMAND) 2906 & PCI_COMMAND_MASTER) && d->has_power); 2907 if (!d->has_power) { 2908 pci_device_reset(d); 2909 } 2910 } 2911 2912 static const TypeInfo pci_device_type_info = { 2913 .name = TYPE_PCI_DEVICE, 2914 .parent = TYPE_DEVICE, 2915 .instance_size = sizeof(PCIDevice), 2916 .abstract = true, 2917 .class_size = sizeof(PCIDeviceClass), 2918 .class_init = pci_device_class_init, 2919 .class_base_init = pci_device_class_base_init, 2920 }; 2921 2922 static void pci_register_types(void) 2923 { 2924 type_register_static(&pci_bus_info); 2925 type_register_static(&pcie_bus_info); 2926 type_register_static(&cxl_bus_info); 2927 type_register_static(&conventional_pci_interface_info); 2928 type_register_static(&cxl_interface_info); 2929 type_register_static(&pcie_interface_info); 2930 type_register_static(&pci_device_type_info); 2931 } 2932 2933 type_init(pci_register_types) 2934