1 /* 2 * QEMU PCI bus manager 3 * 4 * Copyright (c) 2004 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "qemu/datadir.h" 27 #include "qemu/units.h" 28 #include "hw/irq.h" 29 #include "hw/pci/pci.h" 30 #include "hw/pci/pci_bridge.h" 31 #include "hw/pci/pci_bus.h" 32 #include "hw/pci/pci_host.h" 33 #include "hw/qdev-properties.h" 34 #include "hw/qdev-properties-system.h" 35 #include "migration/qemu-file-types.h" 36 #include "migration/vmstate.h" 37 #include "net/net.h" 38 #include "sysemu/numa.h" 39 #include "sysemu/runstate.h" 40 #include "sysemu/sysemu.h" 41 #include "hw/loader.h" 42 #include "qemu/error-report.h" 43 #include "qemu/range.h" 44 #include "trace.h" 45 #include "hw/pci/msi.h" 46 #include "hw/pci/msix.h" 47 #include "hw/hotplug.h" 48 #include "hw/boards.h" 49 #include "qapi/error.h" 50 #include "qemu/cutils.h" 51 #include "pci-internal.h" 52 53 #include "hw/xen/xen.h" 54 #include "hw/i386/kvm/xen_evtchn.h" 55 56 //#define DEBUG_PCI 57 #ifdef DEBUG_PCI 58 # define PCI_DPRINTF(format, ...) printf(format, ## __VA_ARGS__) 59 #else 60 # define PCI_DPRINTF(format, ...) do { } while (0) 61 #endif 62 63 bool pci_available = true; 64 65 static char *pcibus_get_dev_path(DeviceState *dev); 66 static char *pcibus_get_fw_dev_path(DeviceState *dev); 67 static void pcibus_reset_hold(Object *obj, ResetType type); 68 static bool pcie_has_upstream_port(PCIDevice *dev); 69 70 static void prop_pci_busnr_get(Object *obj, Visitor *v, const char *name, 71 void *opaque, Error **errp) 72 { 73 uint8_t busnr = pci_dev_bus_num(PCI_DEVICE(obj)); 74 75 visit_type_uint8(v, name, &busnr, errp); 76 } 77 78 static const PropertyInfo prop_pci_busnr = { 79 .name = "busnr", 80 .get = prop_pci_busnr_get, 81 }; 82 83 static Property pci_props[] = { 84 DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1), 85 DEFINE_PROP_STRING("romfile", PCIDevice, romfile), 86 DEFINE_PROP_UINT32("romsize", PCIDevice, romsize, UINT32_MAX), 87 DEFINE_PROP_UINT32("rombar", PCIDevice, rom_bar, 1), 88 DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present, 89 QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false), 90 DEFINE_PROP_BIT("x-pcie-lnksta-dllla", PCIDevice, cap_present, 91 QEMU_PCIE_LNKSTA_DLLLA_BITNR, true), 92 DEFINE_PROP_BIT("x-pcie-extcap-init", PCIDevice, cap_present, 93 QEMU_PCIE_EXTCAP_INIT_BITNR, true), 94 DEFINE_PROP_STRING("failover_pair_id", PCIDevice, 95 failover_pair_id), 96 DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0), 97 DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present, 98 QEMU_PCIE_ERR_UNC_MASK_BITNR, true), 99 DEFINE_PROP_BIT("x-pcie-ari-nextfn-1", PCIDevice, cap_present, 100 QEMU_PCIE_ARI_NEXTFN_1_BITNR, false), 101 DEFINE_PROP_SIZE32("x-max-bounce-buffer-size", PCIDevice, 102 max_bounce_buffer_size, DEFAULT_MAX_BOUNCE_BUFFER_SIZE), 103 { .name = "busnr", .info = &prop_pci_busnr }, 104 DEFINE_PROP_END_OF_LIST() 105 }; 106 107 static const VMStateDescription vmstate_pcibus = { 108 .name = "PCIBUS", 109 .version_id = 1, 110 .minimum_version_id = 1, 111 .fields = (const VMStateField[]) { 112 VMSTATE_INT32_EQUAL(nirq, PCIBus, NULL), 113 VMSTATE_VARRAY_INT32(irq_count, PCIBus, 114 nirq, 0, vmstate_info_int32, 115 int32_t), 116 VMSTATE_END_OF_LIST() 117 } 118 }; 119 120 static gint g_cmp_uint32(gconstpointer a, gconstpointer b, gpointer user_data) 121 { 122 return a - b; 123 } 124 125 static GSequence *pci_acpi_index_list(void) 126 { 127 static GSequence *used_acpi_index_list; 128 129 if (!used_acpi_index_list) { 130 used_acpi_index_list = g_sequence_new(NULL); 131 } 132 return used_acpi_index_list; 133 } 134 135 static void pci_init_bus_master(PCIDevice *pci_dev) 136 { 137 AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev); 138 139 memory_region_init_alias(&pci_dev->bus_master_enable_region, 140 OBJECT(pci_dev), "bus master", 141 dma_as->root, 0, memory_region_size(dma_as->root)); 142 memory_region_set_enabled(&pci_dev->bus_master_enable_region, false); 143 memory_region_add_subregion(&pci_dev->bus_master_container_region, 0, 144 &pci_dev->bus_master_enable_region); 145 } 146 147 static void pcibus_machine_done(Notifier *notifier, void *data) 148 { 149 PCIBus *bus = container_of(notifier, PCIBus, machine_done); 150 int i; 151 152 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 153 if (bus->devices[i]) { 154 pci_init_bus_master(bus->devices[i]); 155 } 156 } 157 } 158 159 static void pci_bus_realize(BusState *qbus, Error **errp) 160 { 161 PCIBus *bus = PCI_BUS(qbus); 162 163 bus->machine_done.notify = pcibus_machine_done; 164 qemu_add_machine_init_done_notifier(&bus->machine_done); 165 166 vmstate_register_any(NULL, &vmstate_pcibus, bus); 167 } 168 169 static void pcie_bus_realize(BusState *qbus, Error **errp) 170 { 171 PCIBus *bus = PCI_BUS(qbus); 172 Error *local_err = NULL; 173 174 pci_bus_realize(qbus, &local_err); 175 if (local_err) { 176 error_propagate(errp, local_err); 177 return; 178 } 179 180 /* 181 * A PCI-E bus can support extended config space if it's the root 182 * bus, or if the bus/bridge above it does as well 183 */ 184 if (pci_bus_is_root(bus)) { 185 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; 186 } else { 187 PCIBus *parent_bus = pci_get_bus(bus->parent_dev); 188 189 if (pci_bus_allows_extended_config_space(parent_bus)) { 190 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; 191 } 192 } 193 } 194 195 static void pci_bus_unrealize(BusState *qbus) 196 { 197 PCIBus *bus = PCI_BUS(qbus); 198 199 qemu_remove_machine_init_done_notifier(&bus->machine_done); 200 201 vmstate_unregister(NULL, &vmstate_pcibus, bus); 202 } 203 204 static int pcibus_num(PCIBus *bus) 205 { 206 if (pci_bus_is_root(bus)) { 207 return 0; /* pci host bridge */ 208 } 209 return bus->parent_dev->config[PCI_SECONDARY_BUS]; 210 } 211 212 static uint16_t pcibus_numa_node(PCIBus *bus) 213 { 214 return NUMA_NODE_UNASSIGNED; 215 } 216 217 static void pci_bus_class_init(ObjectClass *klass, void *data) 218 { 219 BusClass *k = BUS_CLASS(klass); 220 PCIBusClass *pbc = PCI_BUS_CLASS(klass); 221 ResettableClass *rc = RESETTABLE_CLASS(klass); 222 223 k->print_dev = pcibus_dev_print; 224 k->get_dev_path = pcibus_get_dev_path; 225 k->get_fw_dev_path = pcibus_get_fw_dev_path; 226 k->realize = pci_bus_realize; 227 k->unrealize = pci_bus_unrealize; 228 229 rc->phases.hold = pcibus_reset_hold; 230 231 pbc->bus_num = pcibus_num; 232 pbc->numa_node = pcibus_numa_node; 233 } 234 235 static const TypeInfo pci_bus_info = { 236 .name = TYPE_PCI_BUS, 237 .parent = TYPE_BUS, 238 .instance_size = sizeof(PCIBus), 239 .class_size = sizeof(PCIBusClass), 240 .class_init = pci_bus_class_init, 241 }; 242 243 static const TypeInfo cxl_interface_info = { 244 .name = INTERFACE_CXL_DEVICE, 245 .parent = TYPE_INTERFACE, 246 }; 247 248 static const TypeInfo pcie_interface_info = { 249 .name = INTERFACE_PCIE_DEVICE, 250 .parent = TYPE_INTERFACE, 251 }; 252 253 static const TypeInfo conventional_pci_interface_info = { 254 .name = INTERFACE_CONVENTIONAL_PCI_DEVICE, 255 .parent = TYPE_INTERFACE, 256 }; 257 258 static void pcie_bus_class_init(ObjectClass *klass, void *data) 259 { 260 BusClass *k = BUS_CLASS(klass); 261 262 k->realize = pcie_bus_realize; 263 } 264 265 static const TypeInfo pcie_bus_info = { 266 .name = TYPE_PCIE_BUS, 267 .parent = TYPE_PCI_BUS, 268 .class_init = pcie_bus_class_init, 269 }; 270 271 static const TypeInfo cxl_bus_info = { 272 .name = TYPE_CXL_BUS, 273 .parent = TYPE_PCIE_BUS, 274 .class_init = pcie_bus_class_init, 275 }; 276 277 static void pci_update_mappings(PCIDevice *d); 278 static void pci_irq_handler(void *opaque, int irq_num, int level); 279 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, Error **); 280 static void pci_del_option_rom(PCIDevice *pdev); 281 282 static uint16_t pci_default_sub_vendor_id = PCI_SUBVENDOR_ID_REDHAT_QUMRANET; 283 static uint16_t pci_default_sub_device_id = PCI_SUBDEVICE_ID_QEMU; 284 285 PCIHostStateList pci_host_bridges; 286 287 int pci_bar(PCIDevice *d, int reg) 288 { 289 uint8_t type; 290 291 /* PCIe virtual functions do not have their own BARs */ 292 assert(!pci_is_vf(d)); 293 294 if (reg != PCI_ROM_SLOT) 295 return PCI_BASE_ADDRESS_0 + reg * 4; 296 297 type = d->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; 298 return type == PCI_HEADER_TYPE_BRIDGE ? PCI_ROM_ADDRESS1 : PCI_ROM_ADDRESS; 299 } 300 301 static inline int pci_irq_state(PCIDevice *d, int irq_num) 302 { 303 return (d->irq_state >> irq_num) & 0x1; 304 } 305 306 static inline void pci_set_irq_state(PCIDevice *d, int irq_num, int level) 307 { 308 d->irq_state &= ~(0x1 << irq_num); 309 d->irq_state |= level << irq_num; 310 } 311 312 static void pci_bus_change_irq_level(PCIBus *bus, int irq_num, int change) 313 { 314 assert(irq_num >= 0); 315 assert(irq_num < bus->nirq); 316 bus->irq_count[irq_num] += change; 317 bus->set_irq(bus->irq_opaque, irq_num, bus->irq_count[irq_num] != 0); 318 } 319 320 static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change) 321 { 322 PCIBus *bus; 323 for (;;) { 324 int dev_irq = irq_num; 325 bus = pci_get_bus(pci_dev); 326 assert(bus->map_irq); 327 irq_num = bus->map_irq(pci_dev, irq_num); 328 trace_pci_route_irq(dev_irq, DEVICE(pci_dev)->canonical_path, irq_num, 329 pci_bus_is_root(bus) ? "root-complex" 330 : DEVICE(bus->parent_dev)->canonical_path); 331 if (bus->set_irq) 332 break; 333 pci_dev = bus->parent_dev; 334 } 335 pci_bus_change_irq_level(bus, irq_num, change); 336 } 337 338 int pci_bus_get_irq_level(PCIBus *bus, int irq_num) 339 { 340 assert(irq_num >= 0); 341 assert(irq_num < bus->nirq); 342 return !!bus->irq_count[irq_num]; 343 } 344 345 /* Update interrupt status bit in config space on interrupt 346 * state change. */ 347 static void pci_update_irq_status(PCIDevice *dev) 348 { 349 if (dev->irq_state) { 350 dev->config[PCI_STATUS] |= PCI_STATUS_INTERRUPT; 351 } else { 352 dev->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; 353 } 354 } 355 356 void pci_device_deassert_intx(PCIDevice *dev) 357 { 358 int i; 359 for (i = 0; i < PCI_NUM_PINS; ++i) { 360 pci_irq_handler(dev, i, 0); 361 } 362 } 363 364 static void pci_msi_trigger(PCIDevice *dev, MSIMessage msg) 365 { 366 MemTxAttrs attrs = {}; 367 368 /* 369 * Xen uses the high bits of the address to contain some of the bits 370 * of the PIRQ#. Therefore we can't just send the write cycle and 371 * trust that it's caught by the APIC at 0xfee00000 because the 372 * target of the write might be e.g. 0x0x1000fee46000 for PIRQ#4166. 373 * So we intercept the delivery here instead of in kvm_send_msi(). 374 */ 375 if (xen_mode == XEN_EMULATE && 376 xen_evtchn_deliver_pirq_msi(msg.address, msg.data)) { 377 return; 378 } 379 attrs.requester_id = pci_requester_id(dev); 380 address_space_stl_le(&dev->bus_master_as, msg.address, msg.data, 381 attrs, NULL); 382 } 383 384 static void pci_reset_regions(PCIDevice *dev) 385 { 386 int r; 387 if (pci_is_vf(dev)) { 388 return; 389 } 390 391 for (r = 0; r < PCI_NUM_REGIONS; ++r) { 392 PCIIORegion *region = &dev->io_regions[r]; 393 if (!region->size) { 394 continue; 395 } 396 397 if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) && 398 region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 399 pci_set_quad(dev->config + pci_bar(dev, r), region->type); 400 } else { 401 pci_set_long(dev->config + pci_bar(dev, r), region->type); 402 } 403 } 404 } 405 406 static void pci_do_device_reset(PCIDevice *dev) 407 { 408 pci_device_deassert_intx(dev); 409 assert(dev->irq_state == 0); 410 411 /* Clear all writable bits */ 412 pci_word_test_and_clear_mask(dev->config + PCI_COMMAND, 413 pci_get_word(dev->wmask + PCI_COMMAND) | 414 pci_get_word(dev->w1cmask + PCI_COMMAND)); 415 pci_word_test_and_clear_mask(dev->config + PCI_STATUS, 416 pci_get_word(dev->wmask + PCI_STATUS) | 417 pci_get_word(dev->w1cmask + PCI_STATUS)); 418 /* Some devices make bits of PCI_INTERRUPT_LINE read only */ 419 pci_byte_test_and_clear_mask(dev->config + PCI_INTERRUPT_LINE, 420 pci_get_word(dev->wmask + PCI_INTERRUPT_LINE) | 421 pci_get_word(dev->w1cmask + PCI_INTERRUPT_LINE)); 422 dev->config[PCI_CACHE_LINE_SIZE] = 0x0; 423 pci_reset_regions(dev); 424 pci_update_mappings(dev); 425 426 msi_reset(dev); 427 msix_reset(dev); 428 pcie_sriov_pf_reset(dev); 429 } 430 431 /* 432 * This function is called on #RST and FLR. 433 * FLR if PCI_EXP_DEVCTL_BCR_FLR is set 434 */ 435 void pci_device_reset(PCIDevice *dev) 436 { 437 device_cold_reset(&dev->qdev); 438 pci_do_device_reset(dev); 439 } 440 441 /* 442 * Trigger pci bus reset under a given bus. 443 * Called via bus_cold_reset on RST# assert, after the devices 444 * have been reset device_cold_reset-ed already. 445 */ 446 static void pcibus_reset_hold(Object *obj, ResetType type) 447 { 448 PCIBus *bus = PCI_BUS(obj); 449 int i; 450 451 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 452 if (bus->devices[i]) { 453 pci_do_device_reset(bus->devices[i]); 454 } 455 } 456 457 for (i = 0; i < bus->nirq; i++) { 458 assert(bus->irq_count[i] == 0); 459 } 460 } 461 462 static void pci_host_bus_register(DeviceState *host) 463 { 464 PCIHostState *host_bridge = PCI_HOST_BRIDGE(host); 465 466 QLIST_INSERT_HEAD(&pci_host_bridges, host_bridge, next); 467 } 468 469 static void pci_host_bus_unregister(DeviceState *host) 470 { 471 PCIHostState *host_bridge = PCI_HOST_BRIDGE(host); 472 473 QLIST_REMOVE(host_bridge, next); 474 } 475 476 PCIBus *pci_device_root_bus(const PCIDevice *d) 477 { 478 PCIBus *bus = pci_get_bus(d); 479 480 while (!pci_bus_is_root(bus)) { 481 d = bus->parent_dev; 482 assert(d != NULL); 483 484 bus = pci_get_bus(d); 485 } 486 487 return bus; 488 } 489 490 const char *pci_root_bus_path(PCIDevice *dev) 491 { 492 PCIBus *rootbus = pci_device_root_bus(dev); 493 PCIHostState *host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent); 494 PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_GET_CLASS(host_bridge); 495 496 assert(host_bridge->bus == rootbus); 497 498 if (hc->root_bus_path) { 499 return (*hc->root_bus_path)(host_bridge, rootbus); 500 } 501 502 return rootbus->qbus.name; 503 } 504 505 bool pci_bus_bypass_iommu(PCIBus *bus) 506 { 507 PCIBus *rootbus = bus; 508 PCIHostState *host_bridge; 509 510 if (!pci_bus_is_root(bus)) { 511 rootbus = pci_device_root_bus(bus->parent_dev); 512 } 513 514 host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent); 515 516 assert(host_bridge->bus == rootbus); 517 518 return host_bridge->bypass_iommu; 519 } 520 521 static void pci_root_bus_internal_init(PCIBus *bus, DeviceState *parent, 522 MemoryRegion *mem, MemoryRegion *io, 523 uint8_t devfn_min) 524 { 525 assert(PCI_FUNC(devfn_min) == 0); 526 bus->devfn_min = devfn_min; 527 bus->slot_reserved_mask = 0x0; 528 bus->address_space_mem = mem; 529 bus->address_space_io = io; 530 bus->flags |= PCI_BUS_IS_ROOT; 531 532 /* host bridge */ 533 QLIST_INIT(&bus->child); 534 535 pci_host_bus_register(parent); 536 } 537 538 static void pci_bus_uninit(PCIBus *bus) 539 { 540 pci_host_bus_unregister(BUS(bus)->parent); 541 } 542 543 bool pci_bus_is_express(const PCIBus *bus) 544 { 545 return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS); 546 } 547 548 void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent, 549 const char *name, 550 MemoryRegion *mem, MemoryRegion *io, 551 uint8_t devfn_min, const char *typename) 552 { 553 qbus_init(bus, bus_size, typename, parent, name); 554 pci_root_bus_internal_init(bus, parent, mem, io, devfn_min); 555 } 556 557 PCIBus *pci_root_bus_new(DeviceState *parent, const char *name, 558 MemoryRegion *mem, MemoryRegion *io, 559 uint8_t devfn_min, const char *typename) 560 { 561 PCIBus *bus; 562 563 bus = PCI_BUS(qbus_new(typename, parent, name)); 564 pci_root_bus_internal_init(bus, parent, mem, io, devfn_min); 565 return bus; 566 } 567 568 void pci_root_bus_cleanup(PCIBus *bus) 569 { 570 pci_bus_uninit(bus); 571 /* the caller of the unplug hotplug handler will delete this device */ 572 qbus_unrealize(BUS(bus)); 573 } 574 575 void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, 576 void *irq_opaque, int nirq) 577 { 578 bus->set_irq = set_irq; 579 bus->irq_opaque = irq_opaque; 580 bus->nirq = nirq; 581 g_free(bus->irq_count); 582 bus->irq_count = g_malloc0(nirq * sizeof(bus->irq_count[0])); 583 } 584 585 void pci_bus_map_irqs(PCIBus *bus, pci_map_irq_fn map_irq) 586 { 587 bus->map_irq = map_irq; 588 } 589 590 void pci_bus_irqs_cleanup(PCIBus *bus) 591 { 592 bus->set_irq = NULL; 593 bus->map_irq = NULL; 594 bus->irq_opaque = NULL; 595 bus->nirq = 0; 596 g_free(bus->irq_count); 597 bus->irq_count = NULL; 598 } 599 600 PCIBus *pci_register_root_bus(DeviceState *parent, const char *name, 601 pci_set_irq_fn set_irq, pci_map_irq_fn map_irq, 602 void *irq_opaque, 603 MemoryRegion *mem, MemoryRegion *io, 604 uint8_t devfn_min, int nirq, 605 const char *typename) 606 { 607 PCIBus *bus; 608 609 bus = pci_root_bus_new(parent, name, mem, io, devfn_min, typename); 610 pci_bus_irqs(bus, set_irq, irq_opaque, nirq); 611 pci_bus_map_irqs(bus, map_irq); 612 return bus; 613 } 614 615 void pci_unregister_root_bus(PCIBus *bus) 616 { 617 pci_bus_irqs_cleanup(bus); 618 pci_root_bus_cleanup(bus); 619 } 620 621 int pci_bus_num(PCIBus *s) 622 { 623 return PCI_BUS_GET_CLASS(s)->bus_num(s); 624 } 625 626 /* Returns the min and max bus numbers of a PCI bus hierarchy */ 627 void pci_bus_range(PCIBus *bus, int *min_bus, int *max_bus) 628 { 629 int i; 630 *min_bus = *max_bus = pci_bus_num(bus); 631 632 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 633 PCIDevice *dev = bus->devices[i]; 634 635 if (dev && IS_PCI_BRIDGE(dev)) { 636 *min_bus = MIN(*min_bus, dev->config[PCI_SECONDARY_BUS]); 637 *max_bus = MAX(*max_bus, dev->config[PCI_SUBORDINATE_BUS]); 638 } 639 } 640 } 641 642 int pci_bus_numa_node(PCIBus *bus) 643 { 644 return PCI_BUS_GET_CLASS(bus)->numa_node(bus); 645 } 646 647 static int get_pci_config_device(QEMUFile *f, void *pv, size_t size, 648 const VMStateField *field) 649 { 650 PCIDevice *s = container_of(pv, PCIDevice, config); 651 uint8_t *config; 652 int i; 653 654 assert(size == pci_config_size(s)); 655 config = g_malloc(size); 656 657 qemu_get_buffer(f, config, size); 658 for (i = 0; i < size; ++i) { 659 if ((config[i] ^ s->config[i]) & 660 s->cmask[i] & ~s->wmask[i] & ~s->w1cmask[i]) { 661 error_report("%s: Bad config data: i=0x%x read: %x device: %x " 662 "cmask: %x wmask: %x w1cmask:%x", __func__, 663 i, config[i], s->config[i], 664 s->cmask[i], s->wmask[i], s->w1cmask[i]); 665 g_free(config); 666 return -EINVAL; 667 } 668 } 669 memcpy(s->config, config, size); 670 671 pci_update_mappings(s); 672 if (IS_PCI_BRIDGE(s)) { 673 pci_bridge_update_mappings(PCI_BRIDGE(s)); 674 } 675 676 memory_region_set_enabled(&s->bus_master_enable_region, 677 pci_get_word(s->config + PCI_COMMAND) 678 & PCI_COMMAND_MASTER); 679 680 g_free(config); 681 return 0; 682 } 683 684 /* just put buffer */ 685 static int put_pci_config_device(QEMUFile *f, void *pv, size_t size, 686 const VMStateField *field, JSONWriter *vmdesc) 687 { 688 const uint8_t **v = pv; 689 assert(size == pci_config_size(container_of(pv, PCIDevice, config))); 690 qemu_put_buffer(f, *v, size); 691 692 return 0; 693 } 694 695 static const VMStateInfo vmstate_info_pci_config = { 696 .name = "pci config", 697 .get = get_pci_config_device, 698 .put = put_pci_config_device, 699 }; 700 701 static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size, 702 const VMStateField *field) 703 { 704 PCIDevice *s = container_of(pv, PCIDevice, irq_state); 705 uint32_t irq_state[PCI_NUM_PINS]; 706 int i; 707 for (i = 0; i < PCI_NUM_PINS; ++i) { 708 irq_state[i] = qemu_get_be32(f); 709 if (irq_state[i] != 0x1 && irq_state[i] != 0) { 710 fprintf(stderr, "irq state %d: must be 0 or 1.\n", 711 irq_state[i]); 712 return -EINVAL; 713 } 714 } 715 716 for (i = 0; i < PCI_NUM_PINS; ++i) { 717 pci_set_irq_state(s, i, irq_state[i]); 718 } 719 720 return 0; 721 } 722 723 static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size, 724 const VMStateField *field, JSONWriter *vmdesc) 725 { 726 int i; 727 PCIDevice *s = container_of(pv, PCIDevice, irq_state); 728 729 for (i = 0; i < PCI_NUM_PINS; ++i) { 730 qemu_put_be32(f, pci_irq_state(s, i)); 731 } 732 733 return 0; 734 } 735 736 static const VMStateInfo vmstate_info_pci_irq_state = { 737 .name = "pci irq state", 738 .get = get_pci_irq_state, 739 .put = put_pci_irq_state, 740 }; 741 742 static bool migrate_is_pcie(void *opaque, int version_id) 743 { 744 return pci_is_express((PCIDevice *)opaque); 745 } 746 747 static bool migrate_is_not_pcie(void *opaque, int version_id) 748 { 749 return !pci_is_express((PCIDevice *)opaque); 750 } 751 752 const VMStateDescription vmstate_pci_device = { 753 .name = "PCIDevice", 754 .version_id = 2, 755 .minimum_version_id = 1, 756 .fields = (const VMStateField[]) { 757 VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice), 758 VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, 759 migrate_is_not_pcie, 760 0, vmstate_info_pci_config, 761 PCI_CONFIG_SPACE_SIZE), 762 VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, 763 migrate_is_pcie, 764 0, vmstate_info_pci_config, 765 PCIE_CONFIG_SPACE_SIZE), 766 VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2, 767 vmstate_info_pci_irq_state, 768 PCI_NUM_PINS * sizeof(int32_t)), 769 VMSTATE_END_OF_LIST() 770 } 771 }; 772 773 774 void pci_device_save(PCIDevice *s, QEMUFile *f) 775 { 776 /* Clear interrupt status bit: it is implicit 777 * in irq_state which we are saving. 778 * This makes us compatible with old devices 779 * which never set or clear this bit. */ 780 s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; 781 vmstate_save_state(f, &vmstate_pci_device, s, NULL); 782 /* Restore the interrupt status bit. */ 783 pci_update_irq_status(s); 784 } 785 786 int pci_device_load(PCIDevice *s, QEMUFile *f) 787 { 788 int ret; 789 ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id); 790 /* Restore the interrupt status bit. */ 791 pci_update_irq_status(s); 792 return ret; 793 } 794 795 static void pci_set_default_subsystem_id(PCIDevice *pci_dev) 796 { 797 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, 798 pci_default_sub_vendor_id); 799 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 800 pci_default_sub_device_id); 801 } 802 803 /* 804 * Parse [[<domain>:]<bus>:]<slot>, return -1 on error if funcp == NULL 805 * [[<domain>:]<bus>:]<slot>.<func>, return -1 on error 806 */ 807 static int pci_parse_devaddr(const char *addr, int *domp, int *busp, 808 unsigned int *slotp, unsigned int *funcp) 809 { 810 const char *p; 811 char *e; 812 unsigned long val; 813 unsigned long dom = 0, bus = 0; 814 unsigned int slot = 0; 815 unsigned int func = 0; 816 817 p = addr; 818 val = strtoul(p, &e, 16); 819 if (e == p) 820 return -1; 821 if (*e == ':') { 822 bus = val; 823 p = e + 1; 824 val = strtoul(p, &e, 16); 825 if (e == p) 826 return -1; 827 if (*e == ':') { 828 dom = bus; 829 bus = val; 830 p = e + 1; 831 val = strtoul(p, &e, 16); 832 if (e == p) 833 return -1; 834 } 835 } 836 837 slot = val; 838 839 if (funcp != NULL) { 840 if (*e != '.') 841 return -1; 842 843 p = e + 1; 844 val = strtoul(p, &e, 16); 845 if (e == p) 846 return -1; 847 848 func = val; 849 } 850 851 /* if funcp == NULL func is 0 */ 852 if (dom > 0xffff || bus > 0xff || slot > 0x1f || func > 7) 853 return -1; 854 855 if (*e) 856 return -1; 857 858 *domp = dom; 859 *busp = bus; 860 *slotp = slot; 861 if (funcp != NULL) 862 *funcp = func; 863 return 0; 864 } 865 866 static void pci_init_cmask(PCIDevice *dev) 867 { 868 pci_set_word(dev->cmask + PCI_VENDOR_ID, 0xffff); 869 pci_set_word(dev->cmask + PCI_DEVICE_ID, 0xffff); 870 dev->cmask[PCI_STATUS] = PCI_STATUS_CAP_LIST; 871 dev->cmask[PCI_REVISION_ID] = 0xff; 872 dev->cmask[PCI_CLASS_PROG] = 0xff; 873 pci_set_word(dev->cmask + PCI_CLASS_DEVICE, 0xffff); 874 dev->cmask[PCI_HEADER_TYPE] = 0xff; 875 dev->cmask[PCI_CAPABILITY_LIST] = 0xff; 876 } 877 878 static void pci_init_wmask(PCIDevice *dev) 879 { 880 int config_size = pci_config_size(dev); 881 882 dev->wmask[PCI_CACHE_LINE_SIZE] = 0xff; 883 dev->wmask[PCI_INTERRUPT_LINE] = 0xff; 884 pci_set_word(dev->wmask + PCI_COMMAND, 885 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | 886 PCI_COMMAND_INTX_DISABLE); 887 pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR); 888 889 memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff, 890 config_size - PCI_CONFIG_HEADER_SIZE); 891 } 892 893 static void pci_init_w1cmask(PCIDevice *dev) 894 { 895 /* 896 * Note: It's okay to set w1cmask even for readonly bits as 897 * long as their value is hardwired to 0. 898 */ 899 pci_set_word(dev->w1cmask + PCI_STATUS, 900 PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | 901 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT | 902 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY); 903 } 904 905 static void pci_init_mask_bridge(PCIDevice *d) 906 { 907 /* PCI_PRIMARY_BUS, PCI_SECONDARY_BUS, PCI_SUBORDINATE_BUS and 908 PCI_SEC_LATENCY_TIMER */ 909 memset(d->wmask + PCI_PRIMARY_BUS, 0xff, 4); 910 911 /* base and limit */ 912 d->wmask[PCI_IO_BASE] = PCI_IO_RANGE_MASK & 0xff; 913 d->wmask[PCI_IO_LIMIT] = PCI_IO_RANGE_MASK & 0xff; 914 pci_set_word(d->wmask + PCI_MEMORY_BASE, 915 PCI_MEMORY_RANGE_MASK & 0xffff); 916 pci_set_word(d->wmask + PCI_MEMORY_LIMIT, 917 PCI_MEMORY_RANGE_MASK & 0xffff); 918 pci_set_word(d->wmask + PCI_PREF_MEMORY_BASE, 919 PCI_PREF_RANGE_MASK & 0xffff); 920 pci_set_word(d->wmask + PCI_PREF_MEMORY_LIMIT, 921 PCI_PREF_RANGE_MASK & 0xffff); 922 923 /* PCI_PREF_BASE_UPPER32 and PCI_PREF_LIMIT_UPPER32 */ 924 memset(d->wmask + PCI_PREF_BASE_UPPER32, 0xff, 8); 925 926 /* Supported memory and i/o types */ 927 d->config[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_16; 928 d->config[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_16; 929 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_BASE, 930 PCI_PREF_RANGE_TYPE_64); 931 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_LIMIT, 932 PCI_PREF_RANGE_TYPE_64); 933 934 /* 935 * TODO: Bridges default to 10-bit VGA decoding but we currently only 936 * implement 16-bit decoding (no alias support). 937 */ 938 pci_set_word(d->wmask + PCI_BRIDGE_CONTROL, 939 PCI_BRIDGE_CTL_PARITY | 940 PCI_BRIDGE_CTL_SERR | 941 PCI_BRIDGE_CTL_ISA | 942 PCI_BRIDGE_CTL_VGA | 943 PCI_BRIDGE_CTL_VGA_16BIT | 944 PCI_BRIDGE_CTL_MASTER_ABORT | 945 PCI_BRIDGE_CTL_BUS_RESET | 946 PCI_BRIDGE_CTL_FAST_BACK | 947 PCI_BRIDGE_CTL_DISCARD | 948 PCI_BRIDGE_CTL_SEC_DISCARD | 949 PCI_BRIDGE_CTL_DISCARD_SERR); 950 /* Below does not do anything as we never set this bit, put here for 951 * completeness. */ 952 pci_set_word(d->w1cmask + PCI_BRIDGE_CONTROL, 953 PCI_BRIDGE_CTL_DISCARD_STATUS); 954 d->cmask[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_MASK; 955 d->cmask[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_MASK; 956 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_BASE, 957 PCI_PREF_RANGE_TYPE_MASK); 958 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_LIMIT, 959 PCI_PREF_RANGE_TYPE_MASK); 960 } 961 962 static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp) 963 { 964 uint8_t slot = PCI_SLOT(dev->devfn); 965 uint8_t func; 966 967 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 968 dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; 969 } 970 971 /* 972 * With SR/IOV and ARI, a device at function 0 need not be a multifunction 973 * device, as it may just be a VF that ended up with function 0 in 974 * the legacy PCI interpretation. Avoid failing in such cases: 975 */ 976 if (pci_is_vf(dev) && 977 dev->exp.sriov_vf.pf->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 978 return; 979 } 980 981 /* 982 * multifunction bit is interpreted in two ways as follows. 983 * - all functions must set the bit to 1. 984 * Example: Intel X53 985 * - function 0 must set the bit, but the rest function (> 0) 986 * is allowed to leave the bit to 0. 987 * Example: PIIX3(also in qemu), PIIX4(also in qemu), ICH10, 988 * 989 * So OS (at least Linux) checks the bit of only function 0, 990 * and doesn't see the bit of function > 0. 991 * 992 * The below check allows both interpretation. 993 */ 994 if (PCI_FUNC(dev->devfn)) { 995 PCIDevice *f0 = bus->devices[PCI_DEVFN(slot, 0)]; 996 if (f0 && !(f0->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) { 997 /* function 0 should set multifunction bit */ 998 error_setg(errp, "PCI: single function device can't be populated " 999 "in function %x.%x", slot, PCI_FUNC(dev->devfn)); 1000 return; 1001 } 1002 return; 1003 } 1004 1005 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 1006 return; 1007 } 1008 /* function 0 indicates single function, so function > 0 must be NULL */ 1009 for (func = 1; func < PCI_FUNC_MAX; ++func) { 1010 if (bus->devices[PCI_DEVFN(slot, func)]) { 1011 error_setg(errp, "PCI: %x.0 indicates single function, " 1012 "but %x.%x is already populated.", 1013 slot, slot, func); 1014 return; 1015 } 1016 } 1017 } 1018 1019 static void pci_config_alloc(PCIDevice *pci_dev) 1020 { 1021 int config_size = pci_config_size(pci_dev); 1022 1023 pci_dev->config = g_malloc0(config_size); 1024 pci_dev->cmask = g_malloc0(config_size); 1025 pci_dev->wmask = g_malloc0(config_size); 1026 pci_dev->w1cmask = g_malloc0(config_size); 1027 pci_dev->used = g_malloc0(config_size); 1028 } 1029 1030 static void pci_config_free(PCIDevice *pci_dev) 1031 { 1032 g_free(pci_dev->config); 1033 g_free(pci_dev->cmask); 1034 g_free(pci_dev->wmask); 1035 g_free(pci_dev->w1cmask); 1036 g_free(pci_dev->used); 1037 } 1038 1039 static void do_pci_unregister_device(PCIDevice *pci_dev) 1040 { 1041 pci_get_bus(pci_dev)->devices[pci_dev->devfn] = NULL; 1042 pci_config_free(pci_dev); 1043 1044 if (xen_mode == XEN_EMULATE) { 1045 xen_evtchn_remove_pci_device(pci_dev); 1046 } 1047 if (memory_region_is_mapped(&pci_dev->bus_master_enable_region)) { 1048 memory_region_del_subregion(&pci_dev->bus_master_container_region, 1049 &pci_dev->bus_master_enable_region); 1050 } 1051 address_space_destroy(&pci_dev->bus_master_as); 1052 } 1053 1054 /* Extract PCIReqIDCache into BDF format */ 1055 static uint16_t pci_req_id_cache_extract(PCIReqIDCache *cache) 1056 { 1057 uint8_t bus_n; 1058 uint16_t result; 1059 1060 switch (cache->type) { 1061 case PCI_REQ_ID_BDF: 1062 result = pci_get_bdf(cache->dev); 1063 break; 1064 case PCI_REQ_ID_SECONDARY_BUS: 1065 bus_n = pci_dev_bus_num(cache->dev); 1066 result = PCI_BUILD_BDF(bus_n, 0); 1067 break; 1068 default: 1069 error_report("Invalid PCI requester ID cache type: %d", 1070 cache->type); 1071 exit(1); 1072 break; 1073 } 1074 1075 return result; 1076 } 1077 1078 /* Parse bridges up to the root complex and return requester ID 1079 * cache for specific device. For full PCIe topology, the cache 1080 * result would be exactly the same as getting BDF of the device. 1081 * However, several tricks are required when system mixed up with 1082 * legacy PCI devices and PCIe-to-PCI bridges. 1083 * 1084 * Here we cache the proxy device (and type) not requester ID since 1085 * bus number might change from time to time. 1086 */ 1087 static PCIReqIDCache pci_req_id_cache_get(PCIDevice *dev) 1088 { 1089 PCIDevice *parent; 1090 PCIReqIDCache cache = { 1091 .dev = dev, 1092 .type = PCI_REQ_ID_BDF, 1093 }; 1094 1095 while (!pci_bus_is_root(pci_get_bus(dev))) { 1096 /* We are under PCI/PCIe bridges */ 1097 parent = pci_get_bus(dev)->parent_dev; 1098 if (pci_is_express(parent)) { 1099 if (pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) { 1100 /* When we pass through PCIe-to-PCI/PCIX bridges, we 1101 * override the requester ID using secondary bus 1102 * number of parent bridge with zeroed devfn 1103 * (pcie-to-pci bridge spec chap 2.3). */ 1104 cache.type = PCI_REQ_ID_SECONDARY_BUS; 1105 cache.dev = dev; 1106 } 1107 } else { 1108 /* Legacy PCI, override requester ID with the bridge's 1109 * BDF upstream. When the root complex connects to 1110 * legacy PCI devices (including buses), it can only 1111 * obtain requester ID info from directly attached 1112 * devices. If devices are attached under bridges, only 1113 * the requester ID of the bridge that is directly 1114 * attached to the root complex can be recognized. */ 1115 cache.type = PCI_REQ_ID_BDF; 1116 cache.dev = parent; 1117 } 1118 dev = parent; 1119 } 1120 1121 return cache; 1122 } 1123 1124 uint16_t pci_requester_id(PCIDevice *dev) 1125 { 1126 return pci_req_id_cache_extract(&dev->requester_id_cache); 1127 } 1128 1129 static bool pci_bus_devfn_available(PCIBus *bus, int devfn) 1130 { 1131 return !(bus->devices[devfn]); 1132 } 1133 1134 static bool pci_bus_devfn_reserved(PCIBus *bus, int devfn) 1135 { 1136 return bus->slot_reserved_mask & (1UL << PCI_SLOT(devfn)); 1137 } 1138 1139 uint32_t pci_bus_get_slot_reserved_mask(PCIBus *bus) 1140 { 1141 return bus->slot_reserved_mask; 1142 } 1143 1144 void pci_bus_set_slot_reserved_mask(PCIBus *bus, uint32_t mask) 1145 { 1146 bus->slot_reserved_mask |= mask; 1147 } 1148 1149 void pci_bus_clear_slot_reserved_mask(PCIBus *bus, uint32_t mask) 1150 { 1151 bus->slot_reserved_mask &= ~mask; 1152 } 1153 1154 /* -1 for devfn means auto assign */ 1155 static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, 1156 const char *name, int devfn, 1157 Error **errp) 1158 { 1159 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 1160 PCIConfigReadFunc *config_read = pc->config_read; 1161 PCIConfigWriteFunc *config_write = pc->config_write; 1162 Error *local_err = NULL; 1163 DeviceState *dev = DEVICE(pci_dev); 1164 PCIBus *bus = pci_get_bus(pci_dev); 1165 bool is_bridge = IS_PCI_BRIDGE(pci_dev); 1166 1167 /* Only pci bridges can be attached to extra PCI root buses */ 1168 if (pci_bus_is_root(bus) && bus->parent_dev && !is_bridge) { 1169 error_setg(errp, 1170 "PCI: Only PCI/PCIe bridges can be plugged into %s", 1171 bus->parent_dev->name); 1172 return NULL; 1173 } 1174 1175 if (devfn < 0) { 1176 for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices); 1177 devfn += PCI_FUNC_MAX) { 1178 if (pci_bus_devfn_available(bus, devfn) && 1179 !pci_bus_devfn_reserved(bus, devfn)) { 1180 goto found; 1181 } 1182 } 1183 error_setg(errp, "PCI: no slot/function available for %s, all in use " 1184 "or reserved", name); 1185 return NULL; 1186 found: ; 1187 } else if (pci_bus_devfn_reserved(bus, devfn)) { 1188 error_setg(errp, "PCI: slot %d function %d not available for %s," 1189 " reserved", 1190 PCI_SLOT(devfn), PCI_FUNC(devfn), name); 1191 return NULL; 1192 } else if (!pci_bus_devfn_available(bus, devfn)) { 1193 error_setg(errp, "PCI: slot %d function %d not available for %s," 1194 " in use by %s,id=%s", 1195 PCI_SLOT(devfn), PCI_FUNC(devfn), name, 1196 bus->devices[devfn]->name, bus->devices[devfn]->qdev.id); 1197 return NULL; 1198 } 1199 1200 /* 1201 * Populating function 0 triggers a scan from the guest that 1202 * exposes other non-zero functions. Hence we need to ensure that 1203 * function 0 wasn't added yet. 1204 */ 1205 if (dev->hotplugged && !pci_is_vf(pci_dev) && 1206 pci_get_function_0(pci_dev)) { 1207 error_setg(errp, "PCI: slot %d function 0 already occupied by %s," 1208 " new func %s cannot be exposed to guest.", 1209 PCI_SLOT(pci_get_function_0(pci_dev)->devfn), 1210 pci_get_function_0(pci_dev)->name, 1211 name); 1212 1213 return NULL; 1214 } 1215 1216 pci_dev->devfn = devfn; 1217 pci_dev->requester_id_cache = pci_req_id_cache_get(pci_dev); 1218 pstrcpy(pci_dev->name, sizeof(pci_dev->name), name); 1219 1220 memory_region_init(&pci_dev->bus_master_container_region, OBJECT(pci_dev), 1221 "bus master container", UINT64_MAX); 1222 address_space_init(&pci_dev->bus_master_as, 1223 &pci_dev->bus_master_container_region, pci_dev->name); 1224 pci_dev->bus_master_as.max_bounce_buffer_size = 1225 pci_dev->max_bounce_buffer_size; 1226 1227 if (phase_check(PHASE_MACHINE_READY)) { 1228 pci_init_bus_master(pci_dev); 1229 } 1230 pci_dev->irq_state = 0; 1231 pci_config_alloc(pci_dev); 1232 1233 pci_config_set_vendor_id(pci_dev->config, pc->vendor_id); 1234 pci_config_set_device_id(pci_dev->config, pc->device_id); 1235 pci_config_set_revision(pci_dev->config, pc->revision); 1236 pci_config_set_class(pci_dev->config, pc->class_id); 1237 1238 if (!is_bridge) { 1239 if (pc->subsystem_vendor_id || pc->subsystem_id) { 1240 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, 1241 pc->subsystem_vendor_id); 1242 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 1243 pc->subsystem_id); 1244 } else { 1245 pci_set_default_subsystem_id(pci_dev); 1246 } 1247 } else { 1248 /* subsystem_vendor_id/subsystem_id are only for header type 0 */ 1249 assert(!pc->subsystem_vendor_id); 1250 assert(!pc->subsystem_id); 1251 } 1252 pci_init_cmask(pci_dev); 1253 pci_init_wmask(pci_dev); 1254 pci_init_w1cmask(pci_dev); 1255 if (is_bridge) { 1256 pci_init_mask_bridge(pci_dev); 1257 } 1258 pci_init_multifunction(bus, pci_dev, &local_err); 1259 if (local_err) { 1260 error_propagate(errp, local_err); 1261 do_pci_unregister_device(pci_dev); 1262 return NULL; 1263 } 1264 1265 if (!config_read) 1266 config_read = pci_default_read_config; 1267 if (!config_write) 1268 config_write = pci_default_write_config; 1269 pci_dev->config_read = config_read; 1270 pci_dev->config_write = config_write; 1271 bus->devices[devfn] = pci_dev; 1272 pci_dev->version_id = 2; /* Current pci device vmstate version */ 1273 return pci_dev; 1274 } 1275 1276 static void pci_unregister_io_regions(PCIDevice *pci_dev) 1277 { 1278 PCIIORegion *r; 1279 int i; 1280 1281 for(i = 0; i < PCI_NUM_REGIONS; i++) { 1282 r = &pci_dev->io_regions[i]; 1283 if (!r->size || r->addr == PCI_BAR_UNMAPPED) 1284 continue; 1285 memory_region_del_subregion(r->address_space, r->memory); 1286 } 1287 1288 pci_unregister_vga(pci_dev); 1289 } 1290 1291 static void pci_qdev_unrealize(DeviceState *dev) 1292 { 1293 PCIDevice *pci_dev = PCI_DEVICE(dev); 1294 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 1295 1296 pci_unregister_io_regions(pci_dev); 1297 pci_del_option_rom(pci_dev); 1298 1299 if (pc->exit) { 1300 pc->exit(pci_dev); 1301 } 1302 1303 pci_device_deassert_intx(pci_dev); 1304 do_pci_unregister_device(pci_dev); 1305 1306 pci_dev->msi_trigger = NULL; 1307 1308 /* 1309 * clean up acpi-index so it could reused by another device 1310 */ 1311 if (pci_dev->acpi_index) { 1312 GSequence *used_indexes = pci_acpi_index_list(); 1313 1314 g_sequence_remove(g_sequence_lookup(used_indexes, 1315 GINT_TO_POINTER(pci_dev->acpi_index), 1316 g_cmp_uint32, NULL)); 1317 } 1318 } 1319 1320 void pci_register_bar(PCIDevice *pci_dev, int region_num, 1321 uint8_t type, MemoryRegion *memory) 1322 { 1323 PCIIORegion *r; 1324 uint32_t addr; /* offset in pci config space */ 1325 uint64_t wmask; 1326 pcibus_t size = memory_region_size(memory); 1327 uint8_t hdr_type; 1328 1329 assert(!pci_is_vf(pci_dev)); /* VFs must use pcie_sriov_vf_register_bar */ 1330 assert(region_num >= 0); 1331 assert(region_num < PCI_NUM_REGIONS); 1332 assert(is_power_of_2(size)); 1333 1334 /* A PCI bridge device (with Type 1 header) may only have at most 2 BARs */ 1335 hdr_type = 1336 pci_dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; 1337 assert(hdr_type != PCI_HEADER_TYPE_BRIDGE || region_num < 2); 1338 1339 r = &pci_dev->io_regions[region_num]; 1340 r->addr = PCI_BAR_UNMAPPED; 1341 r->size = size; 1342 r->type = type; 1343 r->memory = memory; 1344 r->address_space = type & PCI_BASE_ADDRESS_SPACE_IO 1345 ? pci_get_bus(pci_dev)->address_space_io 1346 : pci_get_bus(pci_dev)->address_space_mem; 1347 1348 wmask = ~(size - 1); 1349 if (region_num == PCI_ROM_SLOT) { 1350 /* ROM enable bit is writable */ 1351 wmask |= PCI_ROM_ADDRESS_ENABLE; 1352 } 1353 1354 addr = pci_bar(pci_dev, region_num); 1355 pci_set_long(pci_dev->config + addr, type); 1356 1357 if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) && 1358 r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1359 pci_set_quad(pci_dev->wmask + addr, wmask); 1360 pci_set_quad(pci_dev->cmask + addr, ~0ULL); 1361 } else { 1362 pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff); 1363 pci_set_long(pci_dev->cmask + addr, 0xffffffff); 1364 } 1365 } 1366 1367 static void pci_update_vga(PCIDevice *pci_dev) 1368 { 1369 uint16_t cmd; 1370 1371 if (!pci_dev->has_vga) { 1372 return; 1373 } 1374 1375 cmd = pci_get_word(pci_dev->config + PCI_COMMAND); 1376 1377 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_MEM], 1378 cmd & PCI_COMMAND_MEMORY); 1379 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO], 1380 cmd & PCI_COMMAND_IO); 1381 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI], 1382 cmd & PCI_COMMAND_IO); 1383 } 1384 1385 void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem, 1386 MemoryRegion *io_lo, MemoryRegion *io_hi) 1387 { 1388 PCIBus *bus = pci_get_bus(pci_dev); 1389 1390 assert(!pci_dev->has_vga); 1391 1392 assert(memory_region_size(mem) == QEMU_PCI_VGA_MEM_SIZE); 1393 pci_dev->vga_regions[QEMU_PCI_VGA_MEM] = mem; 1394 memory_region_add_subregion_overlap(bus->address_space_mem, 1395 QEMU_PCI_VGA_MEM_BASE, mem, 1); 1396 1397 assert(memory_region_size(io_lo) == QEMU_PCI_VGA_IO_LO_SIZE); 1398 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO] = io_lo; 1399 memory_region_add_subregion_overlap(bus->address_space_io, 1400 QEMU_PCI_VGA_IO_LO_BASE, io_lo, 1); 1401 1402 assert(memory_region_size(io_hi) == QEMU_PCI_VGA_IO_HI_SIZE); 1403 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI] = io_hi; 1404 memory_region_add_subregion_overlap(bus->address_space_io, 1405 QEMU_PCI_VGA_IO_HI_BASE, io_hi, 1); 1406 pci_dev->has_vga = true; 1407 1408 pci_update_vga(pci_dev); 1409 } 1410 1411 void pci_unregister_vga(PCIDevice *pci_dev) 1412 { 1413 PCIBus *bus = pci_get_bus(pci_dev); 1414 1415 if (!pci_dev->has_vga) { 1416 return; 1417 } 1418 1419 memory_region_del_subregion(bus->address_space_mem, 1420 pci_dev->vga_regions[QEMU_PCI_VGA_MEM]); 1421 memory_region_del_subregion(bus->address_space_io, 1422 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO]); 1423 memory_region_del_subregion(bus->address_space_io, 1424 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI]); 1425 pci_dev->has_vga = false; 1426 } 1427 1428 pcibus_t pci_get_bar_addr(PCIDevice *pci_dev, int region_num) 1429 { 1430 return pci_dev->io_regions[region_num].addr; 1431 } 1432 1433 static pcibus_t pci_config_get_bar_addr(PCIDevice *d, int reg, 1434 uint8_t type, pcibus_t size) 1435 { 1436 pcibus_t new_addr; 1437 if (!pci_is_vf(d)) { 1438 int bar = pci_bar(d, reg); 1439 if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1440 new_addr = pci_get_quad(d->config + bar); 1441 } else { 1442 new_addr = pci_get_long(d->config + bar); 1443 } 1444 } else { 1445 PCIDevice *pf = d->exp.sriov_vf.pf; 1446 uint16_t sriov_cap = pf->exp.sriov_cap; 1447 int bar = sriov_cap + PCI_SRIOV_BAR + reg * 4; 1448 uint16_t vf_offset = 1449 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_OFFSET); 1450 uint16_t vf_stride = 1451 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_STRIDE); 1452 uint32_t vf_num = (d->devfn - (pf->devfn + vf_offset)) / vf_stride; 1453 1454 if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1455 new_addr = pci_get_quad(pf->config + bar); 1456 } else { 1457 new_addr = pci_get_long(pf->config + bar); 1458 } 1459 new_addr += vf_num * size; 1460 } 1461 /* The ROM slot has a specific enable bit, keep it intact */ 1462 if (reg != PCI_ROM_SLOT) { 1463 new_addr &= ~(size - 1); 1464 } 1465 return new_addr; 1466 } 1467 1468 pcibus_t pci_bar_address(PCIDevice *d, 1469 int reg, uint8_t type, pcibus_t size) 1470 { 1471 pcibus_t new_addr, last_addr; 1472 uint16_t cmd = pci_get_word(d->config + PCI_COMMAND); 1473 MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); 1474 bool allow_0_address = mc->pci_allow_0_address; 1475 1476 if (type & PCI_BASE_ADDRESS_SPACE_IO) { 1477 if (!(cmd & PCI_COMMAND_IO)) { 1478 return PCI_BAR_UNMAPPED; 1479 } 1480 new_addr = pci_config_get_bar_addr(d, reg, type, size); 1481 last_addr = new_addr + size - 1; 1482 /* Check if 32 bit BAR wraps around explicitly. 1483 * TODO: make priorities correct and remove this work around. 1484 */ 1485 if (last_addr <= new_addr || last_addr >= UINT32_MAX || 1486 (!allow_0_address && new_addr == 0)) { 1487 return PCI_BAR_UNMAPPED; 1488 } 1489 return new_addr; 1490 } 1491 1492 if (!(cmd & PCI_COMMAND_MEMORY)) { 1493 return PCI_BAR_UNMAPPED; 1494 } 1495 new_addr = pci_config_get_bar_addr(d, reg, type, size); 1496 /* the ROM slot has a specific enable bit */ 1497 if (reg == PCI_ROM_SLOT && !(new_addr & PCI_ROM_ADDRESS_ENABLE)) { 1498 return PCI_BAR_UNMAPPED; 1499 } 1500 new_addr &= ~(size - 1); 1501 last_addr = new_addr + size - 1; 1502 /* NOTE: we do not support wrapping */ 1503 /* XXX: as we cannot support really dynamic 1504 mappings, we handle specific values as invalid 1505 mappings. */ 1506 if (last_addr <= new_addr || last_addr == PCI_BAR_UNMAPPED || 1507 (!allow_0_address && new_addr == 0)) { 1508 return PCI_BAR_UNMAPPED; 1509 } 1510 1511 /* Now pcibus_t is 64bit. 1512 * Check if 32 bit BAR wraps around explicitly. 1513 * Without this, PC ide doesn't work well. 1514 * TODO: remove this work around. 1515 */ 1516 if (!(type & PCI_BASE_ADDRESS_MEM_TYPE_64) && last_addr >= UINT32_MAX) { 1517 return PCI_BAR_UNMAPPED; 1518 } 1519 1520 /* 1521 * OS is allowed to set BAR beyond its addressable 1522 * bits. For example, 32 bit OS can set 64bit bar 1523 * to >4G. Check it. TODO: we might need to support 1524 * it in the future for e.g. PAE. 1525 */ 1526 if (last_addr >= HWADDR_MAX) { 1527 return PCI_BAR_UNMAPPED; 1528 } 1529 1530 return new_addr; 1531 } 1532 1533 static void pci_update_mappings(PCIDevice *d) 1534 { 1535 PCIIORegion *r; 1536 int i; 1537 pcibus_t new_addr; 1538 1539 for(i = 0; i < PCI_NUM_REGIONS; i++) { 1540 r = &d->io_regions[i]; 1541 1542 /* this region isn't registered */ 1543 if (!r->size) 1544 continue; 1545 1546 new_addr = pci_bar_address(d, i, r->type, r->size); 1547 if (!d->has_power) { 1548 new_addr = PCI_BAR_UNMAPPED; 1549 } 1550 1551 /* This bar isn't changed */ 1552 if (new_addr == r->addr) 1553 continue; 1554 1555 /* now do the real mapping */ 1556 if (r->addr != PCI_BAR_UNMAPPED) { 1557 trace_pci_update_mappings_del(d->name, pci_dev_bus_num(d), 1558 PCI_SLOT(d->devfn), 1559 PCI_FUNC(d->devfn), 1560 i, r->addr, r->size); 1561 memory_region_del_subregion(r->address_space, r->memory); 1562 } 1563 r->addr = new_addr; 1564 if (r->addr != PCI_BAR_UNMAPPED) { 1565 trace_pci_update_mappings_add(d->name, pci_dev_bus_num(d), 1566 PCI_SLOT(d->devfn), 1567 PCI_FUNC(d->devfn), 1568 i, r->addr, r->size); 1569 memory_region_add_subregion_overlap(r->address_space, 1570 r->addr, r->memory, 1); 1571 } 1572 } 1573 1574 pci_update_vga(d); 1575 } 1576 1577 static inline int pci_irq_disabled(PCIDevice *d) 1578 { 1579 return pci_get_word(d->config + PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE; 1580 } 1581 1582 /* Called after interrupt disabled field update in config space, 1583 * assert/deassert interrupts if necessary. 1584 * Gets original interrupt disable bit value (before update). */ 1585 static void pci_update_irq_disabled(PCIDevice *d, int was_irq_disabled) 1586 { 1587 int i, disabled = pci_irq_disabled(d); 1588 if (disabled == was_irq_disabled) 1589 return; 1590 for (i = 0; i < PCI_NUM_PINS; ++i) { 1591 int state = pci_irq_state(d, i); 1592 pci_change_irq_level(d, i, disabled ? -state : state); 1593 } 1594 } 1595 1596 uint32_t pci_default_read_config(PCIDevice *d, 1597 uint32_t address, int len) 1598 { 1599 uint32_t val = 0; 1600 1601 assert(address + len <= pci_config_size(d)); 1602 1603 if (pci_is_express_downstream_port(d) && 1604 ranges_overlap(address, len, d->exp.exp_cap + PCI_EXP_LNKSTA, 2)) { 1605 pcie_sync_bridge_lnk(d); 1606 } 1607 memcpy(&val, d->config + address, len); 1608 return le32_to_cpu(val); 1609 } 1610 1611 void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int l) 1612 { 1613 int i, was_irq_disabled = pci_irq_disabled(d); 1614 uint32_t val = val_in; 1615 1616 assert(addr + l <= pci_config_size(d)); 1617 1618 for (i = 0; i < l; val >>= 8, ++i) { 1619 uint8_t wmask = d->wmask[addr + i]; 1620 uint8_t w1cmask = d->w1cmask[addr + i]; 1621 assert(!(wmask & w1cmask)); 1622 d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask); 1623 d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */ 1624 } 1625 if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) || 1626 ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) || 1627 ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) || 1628 range_covers_byte(addr, l, PCI_COMMAND)) 1629 pci_update_mappings(d); 1630 1631 if (ranges_overlap(addr, l, PCI_COMMAND, 2)) { 1632 pci_update_irq_disabled(d, was_irq_disabled); 1633 memory_region_set_enabled(&d->bus_master_enable_region, 1634 (pci_get_word(d->config + PCI_COMMAND) 1635 & PCI_COMMAND_MASTER) && d->has_power); 1636 } 1637 1638 msi_write_config(d, addr, val_in, l); 1639 msix_write_config(d, addr, val_in, l); 1640 pcie_sriov_config_write(d, addr, val_in, l); 1641 } 1642 1643 /***********************************************************/ 1644 /* generic PCI irq support */ 1645 1646 /* 0 <= irq_num <= 3. level must be 0 or 1 */ 1647 static void pci_irq_handler(void *opaque, int irq_num, int level) 1648 { 1649 PCIDevice *pci_dev = opaque; 1650 int change; 1651 1652 assert(0 <= irq_num && irq_num < PCI_NUM_PINS); 1653 assert(level == 0 || level == 1); 1654 change = level - pci_irq_state(pci_dev, irq_num); 1655 if (!change) 1656 return; 1657 1658 pci_set_irq_state(pci_dev, irq_num, level); 1659 pci_update_irq_status(pci_dev); 1660 if (pci_irq_disabled(pci_dev)) 1661 return; 1662 pci_change_irq_level(pci_dev, irq_num, change); 1663 } 1664 1665 qemu_irq pci_allocate_irq(PCIDevice *pci_dev) 1666 { 1667 int intx = pci_intx(pci_dev); 1668 assert(0 <= intx && intx < PCI_NUM_PINS); 1669 1670 return qemu_allocate_irq(pci_irq_handler, pci_dev, intx); 1671 } 1672 1673 void pci_set_irq(PCIDevice *pci_dev, int level) 1674 { 1675 int intx = pci_intx(pci_dev); 1676 pci_irq_handler(pci_dev, intx, level); 1677 } 1678 1679 /* Special hooks used by device assignment */ 1680 void pci_bus_set_route_irq_fn(PCIBus *bus, pci_route_irq_fn route_intx_to_irq) 1681 { 1682 assert(pci_bus_is_root(bus)); 1683 bus->route_intx_to_irq = route_intx_to_irq; 1684 } 1685 1686 PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin) 1687 { 1688 PCIBus *bus; 1689 1690 do { 1691 int dev_irq = pin; 1692 bus = pci_get_bus(dev); 1693 pin = bus->map_irq(dev, pin); 1694 trace_pci_route_irq(dev_irq, DEVICE(dev)->canonical_path, pin, 1695 pci_bus_is_root(bus) ? "root-complex" 1696 : DEVICE(bus->parent_dev)->canonical_path); 1697 dev = bus->parent_dev; 1698 } while (dev); 1699 1700 if (!bus->route_intx_to_irq) { 1701 error_report("PCI: Bug - unimplemented PCI INTx routing (%s)", 1702 object_get_typename(OBJECT(bus->qbus.parent))); 1703 return (PCIINTxRoute) { PCI_INTX_DISABLED, -1 }; 1704 } 1705 1706 return bus->route_intx_to_irq(bus->irq_opaque, pin); 1707 } 1708 1709 bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new) 1710 { 1711 return old->mode != new->mode || old->irq != new->irq; 1712 } 1713 1714 void pci_bus_fire_intx_routing_notifier(PCIBus *bus) 1715 { 1716 PCIDevice *dev; 1717 PCIBus *sec; 1718 int i; 1719 1720 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 1721 dev = bus->devices[i]; 1722 if (dev && dev->intx_routing_notifier) { 1723 dev->intx_routing_notifier(dev); 1724 } 1725 } 1726 1727 QLIST_FOREACH(sec, &bus->child, sibling) { 1728 pci_bus_fire_intx_routing_notifier(sec); 1729 } 1730 } 1731 1732 void pci_device_set_intx_routing_notifier(PCIDevice *dev, 1733 PCIINTxRoutingNotifier notifier) 1734 { 1735 dev->intx_routing_notifier = notifier; 1736 } 1737 1738 /* 1739 * PCI-to-PCI bridge specification 1740 * 9.1: Interrupt routing. Table 9-1 1741 * 1742 * the PCI Express Base Specification, Revision 2.1 1743 * 2.2.8.1: INTx interrupt signaling - Rules 1744 * the Implementation Note 1745 * Table 2-20 1746 */ 1747 /* 1748 * 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD 1749 * 0-origin unlike PCI interrupt pin register. 1750 */ 1751 int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin) 1752 { 1753 return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin); 1754 } 1755 1756 /***********************************************************/ 1757 /* monitor info on PCI */ 1758 1759 static const pci_class_desc pci_class_descriptions[] = 1760 { 1761 { 0x0001, "VGA controller", "display"}, 1762 { 0x0100, "SCSI controller", "scsi"}, 1763 { 0x0101, "IDE controller", "ide"}, 1764 { 0x0102, "Floppy controller", "fdc"}, 1765 { 0x0103, "IPI controller", "ipi"}, 1766 { 0x0104, "RAID controller", "raid"}, 1767 { 0x0106, "SATA controller"}, 1768 { 0x0107, "SAS controller"}, 1769 { 0x0180, "Storage controller"}, 1770 { 0x0200, "Ethernet controller", "ethernet"}, 1771 { 0x0201, "Token Ring controller", "token-ring"}, 1772 { 0x0202, "FDDI controller", "fddi"}, 1773 { 0x0203, "ATM controller", "atm"}, 1774 { 0x0280, "Network controller"}, 1775 { 0x0300, "VGA controller", "display", 0x00ff}, 1776 { 0x0301, "XGA controller"}, 1777 { 0x0302, "3D controller"}, 1778 { 0x0380, "Display controller"}, 1779 { 0x0400, "Video controller", "video"}, 1780 { 0x0401, "Audio controller", "sound"}, 1781 { 0x0402, "Phone"}, 1782 { 0x0403, "Audio controller", "sound"}, 1783 { 0x0480, "Multimedia controller"}, 1784 { 0x0500, "RAM controller", "memory"}, 1785 { 0x0501, "Flash controller", "flash"}, 1786 { 0x0580, "Memory controller"}, 1787 { 0x0600, "Host bridge", "host"}, 1788 { 0x0601, "ISA bridge", "isa"}, 1789 { 0x0602, "EISA bridge", "eisa"}, 1790 { 0x0603, "MC bridge", "mca"}, 1791 { 0x0604, "PCI bridge", "pci-bridge"}, 1792 { 0x0605, "PCMCIA bridge", "pcmcia"}, 1793 { 0x0606, "NUBUS bridge", "nubus"}, 1794 { 0x0607, "CARDBUS bridge", "cardbus"}, 1795 { 0x0608, "RACEWAY bridge"}, 1796 { 0x0680, "Bridge"}, 1797 { 0x0700, "Serial port", "serial"}, 1798 { 0x0701, "Parallel port", "parallel"}, 1799 { 0x0800, "Interrupt controller", "interrupt-controller"}, 1800 { 0x0801, "DMA controller", "dma-controller"}, 1801 { 0x0802, "Timer", "timer"}, 1802 { 0x0803, "RTC", "rtc"}, 1803 { 0x0900, "Keyboard", "keyboard"}, 1804 { 0x0901, "Pen", "pen"}, 1805 { 0x0902, "Mouse", "mouse"}, 1806 { 0x0A00, "Dock station", "dock", 0x00ff}, 1807 { 0x0B00, "i386 cpu", "cpu", 0x00ff}, 1808 { 0x0c00, "Firewire controller", "firewire"}, 1809 { 0x0c01, "Access bus controller", "access-bus"}, 1810 { 0x0c02, "SSA controller", "ssa"}, 1811 { 0x0c03, "USB controller", "usb"}, 1812 { 0x0c04, "Fibre channel controller", "fibre-channel"}, 1813 { 0x0c05, "SMBus"}, 1814 { 0, NULL} 1815 }; 1816 1817 void pci_for_each_device_under_bus_reverse(PCIBus *bus, 1818 pci_bus_dev_fn fn, 1819 void *opaque) 1820 { 1821 PCIDevice *d; 1822 int devfn; 1823 1824 for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { 1825 d = bus->devices[ARRAY_SIZE(bus->devices) - 1 - devfn]; 1826 if (d) { 1827 fn(bus, d, opaque); 1828 } 1829 } 1830 } 1831 1832 void pci_for_each_device_reverse(PCIBus *bus, int bus_num, 1833 pci_bus_dev_fn fn, void *opaque) 1834 { 1835 bus = pci_find_bus_nr(bus, bus_num); 1836 1837 if (bus) { 1838 pci_for_each_device_under_bus_reverse(bus, fn, opaque); 1839 } 1840 } 1841 1842 void pci_for_each_device_under_bus(PCIBus *bus, 1843 pci_bus_dev_fn fn, void *opaque) 1844 { 1845 PCIDevice *d; 1846 int devfn; 1847 1848 for(devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { 1849 d = bus->devices[devfn]; 1850 if (d) { 1851 fn(bus, d, opaque); 1852 } 1853 } 1854 } 1855 1856 void pci_for_each_device(PCIBus *bus, int bus_num, 1857 pci_bus_dev_fn fn, void *opaque) 1858 { 1859 bus = pci_find_bus_nr(bus, bus_num); 1860 1861 if (bus) { 1862 pci_for_each_device_under_bus(bus, fn, opaque); 1863 } 1864 } 1865 1866 const pci_class_desc *get_class_desc(int class) 1867 { 1868 const pci_class_desc *desc; 1869 1870 desc = pci_class_descriptions; 1871 while (desc->desc && class != desc->class) { 1872 desc++; 1873 } 1874 1875 return desc; 1876 } 1877 1878 void pci_init_nic_devices(PCIBus *bus, const char *default_model) 1879 { 1880 qemu_create_nic_bus_devices(&bus->qbus, TYPE_PCI_DEVICE, default_model, 1881 "virtio", "virtio-net-pci"); 1882 } 1883 1884 bool pci_init_nic_in_slot(PCIBus *rootbus, const char *model, 1885 const char *alias, const char *devaddr) 1886 { 1887 NICInfo *nd = qemu_find_nic_info(model, true, alias); 1888 int dom, busnr, devfn; 1889 PCIDevice *pci_dev; 1890 unsigned slot; 1891 PCIBus *bus; 1892 1893 if (!nd) { 1894 return false; 1895 } 1896 1897 if (!devaddr || pci_parse_devaddr(devaddr, &dom, &busnr, &slot, NULL) < 0) { 1898 error_report("Invalid PCI device address %s for device %s", 1899 devaddr, model); 1900 exit(1); 1901 } 1902 1903 if (dom != 0) { 1904 error_report("No support for non-zero PCI domains"); 1905 exit(1); 1906 } 1907 1908 devfn = PCI_DEVFN(slot, 0); 1909 1910 bus = pci_find_bus_nr(rootbus, busnr); 1911 if (!bus) { 1912 error_report("Invalid PCI device address %s for device %s", 1913 devaddr, model); 1914 exit(1); 1915 } 1916 1917 pci_dev = pci_new(devfn, model); 1918 qdev_set_nic_properties(&pci_dev->qdev, nd); 1919 pci_realize_and_unref(pci_dev, bus, &error_fatal); 1920 return true; 1921 } 1922 1923 PCIDevice *pci_vga_init(PCIBus *bus) 1924 { 1925 vga_interface_created = true; 1926 switch (vga_interface_type) { 1927 case VGA_CIRRUS: 1928 return pci_create_simple(bus, -1, "cirrus-vga"); 1929 case VGA_QXL: 1930 return pci_create_simple(bus, -1, "qxl-vga"); 1931 case VGA_STD: 1932 return pci_create_simple(bus, -1, "VGA"); 1933 case VGA_VMWARE: 1934 return pci_create_simple(bus, -1, "vmware-svga"); 1935 case VGA_VIRTIO: 1936 return pci_create_simple(bus, -1, "virtio-vga"); 1937 case VGA_NONE: 1938 default: /* Other non-PCI types. Checking for unsupported types is already 1939 done in vl.c. */ 1940 return NULL; 1941 } 1942 } 1943 1944 /* Whether a given bus number is in range of the secondary 1945 * bus of the given bridge device. */ 1946 static bool pci_secondary_bus_in_range(PCIDevice *dev, int bus_num) 1947 { 1948 return !(pci_get_word(dev->config + PCI_BRIDGE_CONTROL) & 1949 PCI_BRIDGE_CTL_BUS_RESET) /* Don't walk the bus if it's reset. */ && 1950 dev->config[PCI_SECONDARY_BUS] <= bus_num && 1951 bus_num <= dev->config[PCI_SUBORDINATE_BUS]; 1952 } 1953 1954 /* Whether a given bus number is in a range of a root bus */ 1955 static bool pci_root_bus_in_range(PCIBus *bus, int bus_num) 1956 { 1957 int i; 1958 1959 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 1960 PCIDevice *dev = bus->devices[i]; 1961 1962 if (dev && IS_PCI_BRIDGE(dev)) { 1963 if (pci_secondary_bus_in_range(dev, bus_num)) { 1964 return true; 1965 } 1966 } 1967 } 1968 1969 return false; 1970 } 1971 1972 PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num) 1973 { 1974 PCIBus *sec; 1975 1976 if (!bus) { 1977 return NULL; 1978 } 1979 1980 if (pci_bus_num(bus) == bus_num) { 1981 return bus; 1982 } 1983 1984 /* Consider all bus numbers in range for the host pci bridge. */ 1985 if (!pci_bus_is_root(bus) && 1986 !pci_secondary_bus_in_range(bus->parent_dev, bus_num)) { 1987 return NULL; 1988 } 1989 1990 /* try child bus */ 1991 for (; bus; bus = sec) { 1992 QLIST_FOREACH(sec, &bus->child, sibling) { 1993 if (pci_bus_num(sec) == bus_num) { 1994 return sec; 1995 } 1996 /* PXB buses assumed to be children of bus 0 */ 1997 if (pci_bus_is_root(sec)) { 1998 if (pci_root_bus_in_range(sec, bus_num)) { 1999 break; 2000 } 2001 } else { 2002 if (pci_secondary_bus_in_range(sec->parent_dev, bus_num)) { 2003 break; 2004 } 2005 } 2006 } 2007 } 2008 2009 return NULL; 2010 } 2011 2012 void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin, 2013 pci_bus_fn end, void *parent_state) 2014 { 2015 PCIBus *sec; 2016 void *state; 2017 2018 if (!bus) { 2019 return; 2020 } 2021 2022 if (begin) { 2023 state = begin(bus, parent_state); 2024 } else { 2025 state = parent_state; 2026 } 2027 2028 QLIST_FOREACH(sec, &bus->child, sibling) { 2029 pci_for_each_bus_depth_first(sec, begin, end, state); 2030 } 2031 2032 if (end) { 2033 end(bus, state); 2034 } 2035 } 2036 2037 2038 PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn) 2039 { 2040 bus = pci_find_bus_nr(bus, bus_num); 2041 2042 if (!bus) 2043 return NULL; 2044 2045 return bus->devices[devfn]; 2046 } 2047 2048 #define ONBOARD_INDEX_MAX (16 * 1024 - 1) 2049 2050 static void pci_qdev_realize(DeviceState *qdev, Error **errp) 2051 { 2052 PCIDevice *pci_dev = (PCIDevice *)qdev; 2053 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 2054 ObjectClass *klass = OBJECT_CLASS(pc); 2055 Error *local_err = NULL; 2056 bool is_default_rom; 2057 uint16_t class_id; 2058 2059 /* 2060 * capped by systemd (see: udev-builtin-net_id.c) 2061 * as it's the only known user honor it to avoid users 2062 * misconfigure QEMU and then wonder why acpi-index doesn't work 2063 */ 2064 if (pci_dev->acpi_index > ONBOARD_INDEX_MAX) { 2065 error_setg(errp, "acpi-index should be less or equal to %u", 2066 ONBOARD_INDEX_MAX); 2067 return; 2068 } 2069 2070 /* 2071 * make sure that acpi-index is unique across all present PCI devices 2072 */ 2073 if (pci_dev->acpi_index) { 2074 GSequence *used_indexes = pci_acpi_index_list(); 2075 2076 if (g_sequence_lookup(used_indexes, 2077 GINT_TO_POINTER(pci_dev->acpi_index), 2078 g_cmp_uint32, NULL)) { 2079 error_setg(errp, "a PCI device with acpi-index = %" PRIu32 2080 " already exist", pci_dev->acpi_index); 2081 return; 2082 } 2083 g_sequence_insert_sorted(used_indexes, 2084 GINT_TO_POINTER(pci_dev->acpi_index), 2085 g_cmp_uint32, NULL); 2086 } 2087 2088 if (pci_dev->romsize != UINT32_MAX && !is_power_of_2(pci_dev->romsize)) { 2089 error_setg(errp, "ROM size %u is not a power of two", pci_dev->romsize); 2090 return; 2091 } 2092 2093 /* initialize cap_present for pci_is_express() and pci_config_size(), 2094 * Note that hybrid PCIs are not set automatically and need to manage 2095 * QEMU_PCI_CAP_EXPRESS manually */ 2096 if (object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE) && 2097 !object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE)) { 2098 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; 2099 } 2100 2101 if (object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE)) { 2102 pci_dev->cap_present |= QEMU_PCIE_CAP_CXL; 2103 } 2104 2105 pci_dev = do_pci_register_device(pci_dev, 2106 object_get_typename(OBJECT(qdev)), 2107 pci_dev->devfn, errp); 2108 if (pci_dev == NULL) 2109 return; 2110 2111 if (pc->realize) { 2112 pc->realize(pci_dev, &local_err); 2113 if (local_err) { 2114 error_propagate(errp, local_err); 2115 do_pci_unregister_device(pci_dev); 2116 return; 2117 } 2118 } 2119 2120 /* 2121 * A PCIe Downstream Port that do not have ARI Forwarding enabled must 2122 * associate only Device 0 with the device attached to the bus 2123 * representing the Link from the Port (PCIe base spec rev 4.0 ver 0.3, 2124 * sec 7.3.1). 2125 * With ARI, PCI_SLOT() can return non-zero value as the traditional 2126 * 5-bit Device Number and 3-bit Function Number fields in its associated 2127 * Routing IDs, Requester IDs and Completer IDs are interpreted as a 2128 * single 8-bit Function Number. Hence, ignore ARI capable devices. 2129 */ 2130 if (pci_is_express(pci_dev) && 2131 !pcie_find_capability(pci_dev, PCI_EXT_CAP_ID_ARI) && 2132 pcie_has_upstream_port(pci_dev) && 2133 PCI_SLOT(pci_dev->devfn)) { 2134 warn_report("PCI: slot %d is not valid for %s," 2135 " parent device only allows plugging into slot 0.", 2136 PCI_SLOT(pci_dev->devfn), pci_dev->name); 2137 } 2138 2139 if (pci_dev->failover_pair_id) { 2140 if (!pci_bus_is_express(pci_get_bus(pci_dev))) { 2141 error_setg(errp, "failover primary device must be on " 2142 "PCIExpress bus"); 2143 pci_qdev_unrealize(DEVICE(pci_dev)); 2144 return; 2145 } 2146 class_id = pci_get_word(pci_dev->config + PCI_CLASS_DEVICE); 2147 if (class_id != PCI_CLASS_NETWORK_ETHERNET) { 2148 error_setg(errp, "failover primary device is not an " 2149 "Ethernet device"); 2150 pci_qdev_unrealize(DEVICE(pci_dev)); 2151 return; 2152 } 2153 if ((pci_dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) 2154 || (PCI_FUNC(pci_dev->devfn) != 0)) { 2155 error_setg(errp, "failover: primary device must be in its own " 2156 "PCI slot"); 2157 pci_qdev_unrealize(DEVICE(pci_dev)); 2158 return; 2159 } 2160 qdev->allow_unplug_during_migration = true; 2161 } 2162 2163 /* rom loading */ 2164 is_default_rom = false; 2165 if (pci_dev->romfile == NULL && pc->romfile != NULL) { 2166 pci_dev->romfile = g_strdup(pc->romfile); 2167 is_default_rom = true; 2168 } 2169 2170 pci_add_option_rom(pci_dev, is_default_rom, &local_err); 2171 if (local_err) { 2172 error_propagate(errp, local_err); 2173 pci_qdev_unrealize(DEVICE(pci_dev)); 2174 return; 2175 } 2176 2177 pci_set_power(pci_dev, true); 2178 2179 pci_dev->msi_trigger = pci_msi_trigger; 2180 } 2181 2182 static PCIDevice *pci_new_internal(int devfn, bool multifunction, 2183 const char *name) 2184 { 2185 DeviceState *dev; 2186 2187 dev = qdev_new(name); 2188 qdev_prop_set_int32(dev, "addr", devfn); 2189 qdev_prop_set_bit(dev, "multifunction", multifunction); 2190 return PCI_DEVICE(dev); 2191 } 2192 2193 PCIDevice *pci_new_multifunction(int devfn, const char *name) 2194 { 2195 return pci_new_internal(devfn, true, name); 2196 } 2197 2198 PCIDevice *pci_new(int devfn, const char *name) 2199 { 2200 return pci_new_internal(devfn, false, name); 2201 } 2202 2203 bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp) 2204 { 2205 return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp); 2206 } 2207 2208 PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn, 2209 const char *name) 2210 { 2211 PCIDevice *dev = pci_new_multifunction(devfn, name); 2212 pci_realize_and_unref(dev, bus, &error_fatal); 2213 return dev; 2214 } 2215 2216 PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name) 2217 { 2218 PCIDevice *dev = pci_new(devfn, name); 2219 pci_realize_and_unref(dev, bus, &error_fatal); 2220 return dev; 2221 } 2222 2223 static uint8_t pci_find_space(PCIDevice *pdev, uint8_t size) 2224 { 2225 int offset = PCI_CONFIG_HEADER_SIZE; 2226 int i; 2227 for (i = PCI_CONFIG_HEADER_SIZE; i < PCI_CONFIG_SPACE_SIZE; ++i) { 2228 if (pdev->used[i]) 2229 offset = i + 1; 2230 else if (i - offset + 1 == size) 2231 return offset; 2232 } 2233 return 0; 2234 } 2235 2236 static uint8_t pci_find_capability_list(PCIDevice *pdev, uint8_t cap_id, 2237 uint8_t *prev_p) 2238 { 2239 uint8_t next, prev; 2240 2241 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST)) 2242 return 0; 2243 2244 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]); 2245 prev = next + PCI_CAP_LIST_NEXT) 2246 if (pdev->config[next + PCI_CAP_LIST_ID] == cap_id) 2247 break; 2248 2249 if (prev_p) 2250 *prev_p = prev; 2251 return next; 2252 } 2253 2254 static uint8_t pci_find_capability_at_offset(PCIDevice *pdev, uint8_t offset) 2255 { 2256 uint8_t next, prev, found = 0; 2257 2258 if (!(pdev->used[offset])) { 2259 return 0; 2260 } 2261 2262 assert(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST); 2263 2264 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]); 2265 prev = next + PCI_CAP_LIST_NEXT) { 2266 if (next <= offset && next > found) { 2267 found = next; 2268 } 2269 } 2270 return found; 2271 } 2272 2273 /* Patch the PCI vendor and device ids in a PCI rom image if necessary. 2274 This is needed for an option rom which is used for more than one device. */ 2275 static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size) 2276 { 2277 uint16_t vendor_id; 2278 uint16_t device_id; 2279 uint16_t rom_vendor_id; 2280 uint16_t rom_device_id; 2281 uint16_t rom_magic; 2282 uint16_t pcir_offset; 2283 uint8_t checksum; 2284 2285 /* Words in rom data are little endian (like in PCI configuration), 2286 so they can be read / written with pci_get_word / pci_set_word. */ 2287 2288 /* Only a valid rom will be patched. */ 2289 rom_magic = pci_get_word(ptr); 2290 if (rom_magic != 0xaa55) { 2291 PCI_DPRINTF("Bad ROM magic %04x\n", rom_magic); 2292 return; 2293 } 2294 pcir_offset = pci_get_word(ptr + 0x18); 2295 if (pcir_offset + 8 >= size || memcmp(ptr + pcir_offset, "PCIR", 4)) { 2296 PCI_DPRINTF("Bad PCIR offset 0x%x or signature\n", pcir_offset); 2297 return; 2298 } 2299 2300 vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID); 2301 device_id = pci_get_word(pdev->config + PCI_DEVICE_ID); 2302 rom_vendor_id = pci_get_word(ptr + pcir_offset + 4); 2303 rom_device_id = pci_get_word(ptr + pcir_offset + 6); 2304 2305 PCI_DPRINTF("%s: ROM id %04x%04x / PCI id %04x%04x\n", pdev->romfile, 2306 vendor_id, device_id, rom_vendor_id, rom_device_id); 2307 2308 checksum = ptr[6]; 2309 2310 if (vendor_id != rom_vendor_id) { 2311 /* Patch vendor id and checksum (at offset 6 for etherboot roms). */ 2312 checksum += (uint8_t)rom_vendor_id + (uint8_t)(rom_vendor_id >> 8); 2313 checksum -= (uint8_t)vendor_id + (uint8_t)(vendor_id >> 8); 2314 PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum); 2315 ptr[6] = checksum; 2316 pci_set_word(ptr + pcir_offset + 4, vendor_id); 2317 } 2318 2319 if (device_id != rom_device_id) { 2320 /* Patch device id and checksum (at offset 6 for etherboot roms). */ 2321 checksum += (uint8_t)rom_device_id + (uint8_t)(rom_device_id >> 8); 2322 checksum -= (uint8_t)device_id + (uint8_t)(device_id >> 8); 2323 PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum); 2324 ptr[6] = checksum; 2325 pci_set_word(ptr + pcir_offset + 6, device_id); 2326 } 2327 } 2328 2329 /* Add an option rom for the device */ 2330 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, 2331 Error **errp) 2332 { 2333 int64_t size = 0; 2334 g_autofree char *path = NULL; 2335 char name[32]; 2336 const VMStateDescription *vmsd; 2337 2338 /* 2339 * In case of incoming migration ROM will come with migration stream, no 2340 * reason to load the file. Neither we want to fail if local ROM file 2341 * mismatches with specified romsize. 2342 */ 2343 bool load_file = !runstate_check(RUN_STATE_INMIGRATE); 2344 2345 if (!pdev->romfile || !strlen(pdev->romfile)) { 2346 return; 2347 } 2348 2349 if (!pdev->rom_bar) { 2350 /* 2351 * Load rom via fw_cfg instead of creating a rom bar, 2352 * for 0.11 compatibility. 2353 */ 2354 int class = pci_get_word(pdev->config + PCI_CLASS_DEVICE); 2355 2356 /* 2357 * Hot-plugged devices can't use the option ROM 2358 * if the rom bar is disabled. 2359 */ 2360 if (DEVICE(pdev)->hotplugged) { 2361 error_setg(errp, "Hot-plugged device without ROM bar" 2362 " can't have an option ROM"); 2363 return; 2364 } 2365 2366 if (class == 0x0300) { 2367 rom_add_vga(pdev->romfile); 2368 } else { 2369 rom_add_option(pdev->romfile, -1); 2370 } 2371 return; 2372 } 2373 2374 if (load_file || pdev->romsize == UINT32_MAX) { 2375 path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile); 2376 if (path == NULL) { 2377 path = g_strdup(pdev->romfile); 2378 } 2379 2380 size = get_image_size(path); 2381 if (size < 0) { 2382 error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile); 2383 return; 2384 } else if (size == 0) { 2385 error_setg(errp, "romfile \"%s\" is empty", pdev->romfile); 2386 return; 2387 } else if (size > 2 * GiB) { 2388 error_setg(errp, 2389 "romfile \"%s\" too large (size cannot exceed 2 GiB)", 2390 pdev->romfile); 2391 return; 2392 } 2393 if (pdev->romsize != UINT_MAX) { 2394 if (size > pdev->romsize) { 2395 error_setg(errp, "romfile \"%s\" (%u bytes) " 2396 "is too large for ROM size %u", 2397 pdev->romfile, (uint32_t)size, pdev->romsize); 2398 return; 2399 } 2400 } else { 2401 pdev->romsize = pow2ceil(size); 2402 } 2403 } 2404 2405 vmsd = qdev_get_vmsd(DEVICE(pdev)); 2406 snprintf(name, sizeof(name), "%s.rom", 2407 vmsd ? vmsd->name : object_get_typename(OBJECT(pdev))); 2408 2409 pdev->has_rom = true; 2410 memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize, 2411 &error_fatal); 2412 2413 if (load_file) { 2414 void *ptr = memory_region_get_ram_ptr(&pdev->rom); 2415 2416 if (load_image_size(path, ptr, size) < 0) { 2417 error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile); 2418 return; 2419 } 2420 2421 if (is_default_rom) { 2422 /* Only the default rom images will be patched (if needed). */ 2423 pci_patch_ids(pdev, ptr, size); 2424 } 2425 } 2426 2427 pci_register_bar(pdev, PCI_ROM_SLOT, 0, &pdev->rom); 2428 } 2429 2430 static void pci_del_option_rom(PCIDevice *pdev) 2431 { 2432 if (!pdev->has_rom) 2433 return; 2434 2435 vmstate_unregister_ram(&pdev->rom, &pdev->qdev); 2436 pdev->has_rom = false; 2437 } 2438 2439 /* 2440 * On success, pci_add_capability() returns a positive value 2441 * that the offset of the pci capability. 2442 * On failure, it sets an error and returns a negative error 2443 * code. 2444 */ 2445 int pci_add_capability(PCIDevice *pdev, uint8_t cap_id, 2446 uint8_t offset, uint8_t size, 2447 Error **errp) 2448 { 2449 uint8_t *config; 2450 int i, overlapping_cap; 2451 2452 if (!offset) { 2453 offset = pci_find_space(pdev, size); 2454 /* out of PCI config space is programming error */ 2455 assert(offset); 2456 } else { 2457 /* Verify that capabilities don't overlap. Note: device assignment 2458 * depends on this check to verify that the device is not broken. 2459 * Should never trigger for emulated devices, but it's helpful 2460 * for debugging these. */ 2461 for (i = offset; i < offset + size; i++) { 2462 overlapping_cap = pci_find_capability_at_offset(pdev, i); 2463 if (overlapping_cap) { 2464 error_setg(errp, "%s:%02x:%02x.%x " 2465 "Attempt to add PCI capability %x at offset " 2466 "%x overlaps existing capability %x at offset %x", 2467 pci_root_bus_path(pdev), pci_dev_bus_num(pdev), 2468 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 2469 cap_id, offset, overlapping_cap, i); 2470 return -EINVAL; 2471 } 2472 } 2473 } 2474 2475 config = pdev->config + offset; 2476 config[PCI_CAP_LIST_ID] = cap_id; 2477 config[PCI_CAP_LIST_NEXT] = pdev->config[PCI_CAPABILITY_LIST]; 2478 pdev->config[PCI_CAPABILITY_LIST] = offset; 2479 pdev->config[PCI_STATUS] |= PCI_STATUS_CAP_LIST; 2480 memset(pdev->used + offset, 0xFF, QEMU_ALIGN_UP(size, 4)); 2481 /* Make capability read-only by default */ 2482 memset(pdev->wmask + offset, 0, size); 2483 /* Check capability by default */ 2484 memset(pdev->cmask + offset, 0xFF, size); 2485 return offset; 2486 } 2487 2488 /* Unlink capability from the pci config space. */ 2489 void pci_del_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t size) 2490 { 2491 uint8_t prev, offset = pci_find_capability_list(pdev, cap_id, &prev); 2492 if (!offset) 2493 return; 2494 pdev->config[prev] = pdev->config[offset + PCI_CAP_LIST_NEXT]; 2495 /* Make capability writable again */ 2496 memset(pdev->wmask + offset, 0xff, size); 2497 memset(pdev->w1cmask + offset, 0, size); 2498 /* Clear cmask as device-specific registers can't be checked */ 2499 memset(pdev->cmask + offset, 0, size); 2500 memset(pdev->used + offset, 0, QEMU_ALIGN_UP(size, 4)); 2501 2502 if (!pdev->config[PCI_CAPABILITY_LIST]) 2503 pdev->config[PCI_STATUS] &= ~PCI_STATUS_CAP_LIST; 2504 } 2505 2506 uint8_t pci_find_capability(PCIDevice *pdev, uint8_t cap_id) 2507 { 2508 return pci_find_capability_list(pdev, cap_id, NULL); 2509 } 2510 2511 static char *pci_dev_fw_name(DeviceState *dev, char *buf, int len) 2512 { 2513 PCIDevice *d = (PCIDevice *)dev; 2514 const char *name = NULL; 2515 const pci_class_desc *desc = pci_class_descriptions; 2516 int class = pci_get_word(d->config + PCI_CLASS_DEVICE); 2517 2518 while (desc->desc && 2519 (class & ~desc->fw_ign_bits) != 2520 (desc->class & ~desc->fw_ign_bits)) { 2521 desc++; 2522 } 2523 2524 if (desc->desc) { 2525 name = desc->fw_name; 2526 } 2527 2528 if (name) { 2529 pstrcpy(buf, len, name); 2530 } else { 2531 snprintf(buf, len, "pci%04x,%04x", 2532 pci_get_word(d->config + PCI_VENDOR_ID), 2533 pci_get_word(d->config + PCI_DEVICE_ID)); 2534 } 2535 2536 return buf; 2537 } 2538 2539 static char *pcibus_get_fw_dev_path(DeviceState *dev) 2540 { 2541 PCIDevice *d = (PCIDevice *)dev; 2542 char name[33]; 2543 int has_func = !!PCI_FUNC(d->devfn); 2544 2545 return g_strdup_printf("%s@%x%s%.*x", 2546 pci_dev_fw_name(dev, name, sizeof(name)), 2547 PCI_SLOT(d->devfn), 2548 has_func ? "," : "", 2549 has_func, 2550 PCI_FUNC(d->devfn)); 2551 } 2552 2553 static char *pcibus_get_dev_path(DeviceState *dev) 2554 { 2555 PCIDevice *d = container_of(dev, PCIDevice, qdev); 2556 PCIDevice *t; 2557 int slot_depth; 2558 /* Path format: Domain:00:Slot.Function:Slot.Function....:Slot.Function. 2559 * 00 is added here to make this format compatible with 2560 * domain:Bus:Slot.Func for systems without nested PCI bridges. 2561 * Slot.Function list specifies the slot and function numbers for all 2562 * devices on the path from root to the specific device. */ 2563 const char *root_bus_path; 2564 int root_bus_len; 2565 char slot[] = ":SS.F"; 2566 int slot_len = sizeof slot - 1 /* For '\0' */; 2567 int path_len; 2568 char *path, *p; 2569 int s; 2570 2571 root_bus_path = pci_root_bus_path(d); 2572 root_bus_len = strlen(root_bus_path); 2573 2574 /* Calculate # of slots on path between device and root. */; 2575 slot_depth = 0; 2576 for (t = d; t; t = pci_get_bus(t)->parent_dev) { 2577 ++slot_depth; 2578 } 2579 2580 path_len = root_bus_len + slot_len * slot_depth; 2581 2582 /* Allocate memory, fill in the terminating null byte. */ 2583 path = g_malloc(path_len + 1 /* For '\0' */); 2584 path[path_len] = '\0'; 2585 2586 memcpy(path, root_bus_path, root_bus_len); 2587 2588 /* Fill in slot numbers. We walk up from device to root, so need to print 2589 * them in the reverse order, last to first. */ 2590 p = path + path_len; 2591 for (t = d; t; t = pci_get_bus(t)->parent_dev) { 2592 p -= slot_len; 2593 s = snprintf(slot, sizeof slot, ":%02x.%x", 2594 PCI_SLOT(t->devfn), PCI_FUNC(t->devfn)); 2595 assert(s == slot_len); 2596 memcpy(p, slot, slot_len); 2597 } 2598 2599 return path; 2600 } 2601 2602 static int pci_qdev_find_recursive(PCIBus *bus, 2603 const char *id, PCIDevice **pdev) 2604 { 2605 DeviceState *qdev = qdev_find_recursive(&bus->qbus, id); 2606 if (!qdev) { 2607 return -ENODEV; 2608 } 2609 2610 /* roughly check if given qdev is pci device */ 2611 if (object_dynamic_cast(OBJECT(qdev), TYPE_PCI_DEVICE)) { 2612 *pdev = PCI_DEVICE(qdev); 2613 return 0; 2614 } 2615 return -EINVAL; 2616 } 2617 2618 int pci_qdev_find_device(const char *id, PCIDevice **pdev) 2619 { 2620 PCIHostState *host_bridge; 2621 int rc = -ENODEV; 2622 2623 QLIST_FOREACH(host_bridge, &pci_host_bridges, next) { 2624 int tmp = pci_qdev_find_recursive(host_bridge->bus, id, pdev); 2625 if (!tmp) { 2626 rc = 0; 2627 break; 2628 } 2629 if (tmp != -ENODEV) { 2630 rc = tmp; 2631 } 2632 } 2633 2634 return rc; 2635 } 2636 2637 MemoryRegion *pci_address_space(PCIDevice *dev) 2638 { 2639 return pci_get_bus(dev)->address_space_mem; 2640 } 2641 2642 MemoryRegion *pci_address_space_io(PCIDevice *dev) 2643 { 2644 return pci_get_bus(dev)->address_space_io; 2645 } 2646 2647 static void pci_device_class_init(ObjectClass *klass, void *data) 2648 { 2649 DeviceClass *k = DEVICE_CLASS(klass); 2650 2651 k->realize = pci_qdev_realize; 2652 k->unrealize = pci_qdev_unrealize; 2653 k->bus_type = TYPE_PCI_BUS; 2654 device_class_set_props(k, pci_props); 2655 object_class_property_set_description( 2656 klass, "x-max-bounce-buffer-size", 2657 "Maximum buffer size allocated for bounce buffers used for mapped " 2658 "access to indirect DMA memory"); 2659 } 2660 2661 static void pci_device_class_base_init(ObjectClass *klass, void *data) 2662 { 2663 if (!object_class_is_abstract(klass)) { 2664 ObjectClass *conventional = 2665 object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE); 2666 ObjectClass *pcie = 2667 object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE); 2668 ObjectClass *cxl = 2669 object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE); 2670 assert(conventional || pcie || cxl); 2671 } 2672 } 2673 2674 /* 2675 * Get IOMMU root bus, aliased bus and devfn of a PCI device 2676 * 2677 * IOMMU root bus is needed by all call sites to call into iommu_ops. 2678 * For call sites which don't need aliased BDF, passing NULL to 2679 * aliased_[bus|devfn] is allowed. 2680 * 2681 * @piommu_bus: return root #PCIBus backed by an IOMMU for the PCI device. 2682 * 2683 * @aliased_bus: return aliased #PCIBus of the PCI device, optional. 2684 * 2685 * @aliased_devfn: return aliased devfn of the PCI device, optional. 2686 */ 2687 static void pci_device_get_iommu_bus_devfn(PCIDevice *dev, 2688 PCIBus **piommu_bus, 2689 PCIBus **aliased_bus, 2690 int *aliased_devfn) 2691 { 2692 PCIBus *bus = pci_get_bus(dev); 2693 PCIBus *iommu_bus = bus; 2694 int devfn = dev->devfn; 2695 2696 while (iommu_bus && !iommu_bus->iommu_ops && iommu_bus->parent_dev) { 2697 PCIBus *parent_bus = pci_get_bus(iommu_bus->parent_dev); 2698 2699 /* 2700 * The requester ID of the provided device may be aliased, as seen from 2701 * the IOMMU, due to topology limitations. The IOMMU relies on a 2702 * requester ID to provide a unique AddressSpace for devices, but 2703 * conventional PCI buses pre-date such concepts. Instead, the PCIe- 2704 * to-PCI bridge creates and accepts transactions on behalf of down- 2705 * stream devices. When doing so, all downstream devices are masked 2706 * (aliased) behind a single requester ID. The requester ID used 2707 * depends on the format of the bridge devices. Proper PCIe-to-PCI 2708 * bridges, with a PCIe capability indicating such, follow the 2709 * guidelines of chapter 2.3 of the PCIe-to-PCI/X bridge specification, 2710 * where the bridge uses the seconary bus as the bridge portion of the 2711 * requester ID and devfn of 00.0. For other bridges, typically those 2712 * found on the root complex such as the dmi-to-pci-bridge, we follow 2713 * the convention of typical bare-metal hardware, which uses the 2714 * requester ID of the bridge itself. There are device specific 2715 * exceptions to these rules, but these are the defaults that the 2716 * Linux kernel uses when determining DMA aliases itself and believed 2717 * to be true for the bare metal equivalents of the devices emulated 2718 * in QEMU. 2719 */ 2720 if (!pci_bus_is_express(iommu_bus)) { 2721 PCIDevice *parent = iommu_bus->parent_dev; 2722 2723 if (pci_is_express(parent) && 2724 pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) { 2725 devfn = PCI_DEVFN(0, 0); 2726 bus = iommu_bus; 2727 } else { 2728 devfn = parent->devfn; 2729 bus = parent_bus; 2730 } 2731 } 2732 2733 iommu_bus = parent_bus; 2734 } 2735 2736 assert(0 <= devfn && devfn < PCI_DEVFN_MAX); 2737 assert(iommu_bus); 2738 2739 if (pci_bus_bypass_iommu(bus) || !iommu_bus->iommu_ops) { 2740 iommu_bus = NULL; 2741 } 2742 2743 *piommu_bus = iommu_bus; 2744 2745 if (aliased_bus) { 2746 *aliased_bus = bus; 2747 } 2748 2749 if (aliased_devfn) { 2750 *aliased_devfn = devfn; 2751 } 2752 } 2753 2754 AddressSpace *pci_device_iommu_address_space(PCIDevice *dev) 2755 { 2756 PCIBus *bus; 2757 PCIBus *iommu_bus; 2758 int devfn; 2759 2760 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); 2761 if (iommu_bus) { 2762 return iommu_bus->iommu_ops->get_address_space(bus, 2763 iommu_bus->iommu_opaque, devfn); 2764 } 2765 return &address_space_memory; 2766 } 2767 2768 bool pci_device_set_iommu_device(PCIDevice *dev, HostIOMMUDevice *hiod, 2769 Error **errp) 2770 { 2771 PCIBus *iommu_bus, *aliased_bus; 2772 int aliased_devfn; 2773 2774 /* set_iommu_device requires device's direct BDF instead of aliased BDF */ 2775 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, 2776 &aliased_bus, &aliased_devfn); 2777 if (iommu_bus && iommu_bus->iommu_ops->set_iommu_device) { 2778 hiod->aliased_bus = aliased_bus; 2779 hiod->aliased_devfn = aliased_devfn; 2780 return iommu_bus->iommu_ops->set_iommu_device(pci_get_bus(dev), 2781 iommu_bus->iommu_opaque, 2782 dev->devfn, hiod, errp); 2783 } 2784 return true; 2785 } 2786 2787 void pci_device_unset_iommu_device(PCIDevice *dev) 2788 { 2789 PCIBus *iommu_bus; 2790 2791 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, NULL, NULL); 2792 if (iommu_bus && iommu_bus->iommu_ops->unset_iommu_device) { 2793 return iommu_bus->iommu_ops->unset_iommu_device(pci_get_bus(dev), 2794 iommu_bus->iommu_opaque, 2795 dev->devfn); 2796 } 2797 } 2798 2799 void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque) 2800 { 2801 /* 2802 * If called, pci_setup_iommu() should provide a minimum set of 2803 * useful callbacks for the bus. 2804 */ 2805 assert(ops); 2806 assert(ops->get_address_space); 2807 2808 bus->iommu_ops = ops; 2809 bus->iommu_opaque = opaque; 2810 } 2811 2812 static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque) 2813 { 2814 Range *range = opaque; 2815 uint16_t cmd = pci_get_word(dev->config + PCI_COMMAND); 2816 int i; 2817 2818 if (!(cmd & PCI_COMMAND_MEMORY)) { 2819 return; 2820 } 2821 2822 if (IS_PCI_BRIDGE(dev)) { 2823 pcibus_t base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); 2824 pcibus_t limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); 2825 2826 base = MAX(base, 0x1ULL << 32); 2827 2828 if (limit >= base) { 2829 Range pref_range; 2830 range_set_bounds(&pref_range, base, limit); 2831 range_extend(range, &pref_range); 2832 } 2833 } 2834 for (i = 0; i < PCI_NUM_REGIONS; ++i) { 2835 PCIIORegion *r = &dev->io_regions[i]; 2836 pcibus_t lob, upb; 2837 Range region_range; 2838 2839 if (!r->size || 2840 (r->type & PCI_BASE_ADDRESS_SPACE_IO) || 2841 !(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64)) { 2842 continue; 2843 } 2844 2845 lob = pci_bar_address(dev, i, r->type, r->size); 2846 upb = lob + r->size - 1; 2847 if (lob == PCI_BAR_UNMAPPED) { 2848 continue; 2849 } 2850 2851 lob = MAX(lob, 0x1ULL << 32); 2852 2853 if (upb >= lob) { 2854 range_set_bounds(®ion_range, lob, upb); 2855 range_extend(range, ®ion_range); 2856 } 2857 } 2858 } 2859 2860 void pci_bus_get_w64_range(PCIBus *bus, Range *range) 2861 { 2862 range_make_empty(range); 2863 pci_for_each_device_under_bus(bus, pci_dev_get_w64, range); 2864 } 2865 2866 static bool pcie_has_upstream_port(PCIDevice *dev) 2867 { 2868 PCIDevice *parent_dev = pci_bridge_get_device(pci_get_bus(dev)); 2869 2870 /* Device associated with an upstream port. 2871 * As there are several types of these, it's easier to check the 2872 * parent device: upstream ports are always connected to 2873 * root or downstream ports. 2874 */ 2875 return parent_dev && 2876 pci_is_express(parent_dev) && 2877 parent_dev->exp.exp_cap && 2878 (pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_ROOT_PORT || 2879 pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_DOWNSTREAM); 2880 } 2881 2882 PCIDevice *pci_get_function_0(PCIDevice *pci_dev) 2883 { 2884 PCIBus *bus = pci_get_bus(pci_dev); 2885 2886 if(pcie_has_upstream_port(pci_dev)) { 2887 /* With an upstream PCIe port, we only support 1 device at slot 0 */ 2888 return bus->devices[0]; 2889 } else { 2890 /* Other bus types might support multiple devices at slots 0-31 */ 2891 return bus->devices[PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 0)]; 2892 } 2893 } 2894 2895 MSIMessage pci_get_msi_message(PCIDevice *dev, int vector) 2896 { 2897 MSIMessage msg; 2898 if (msix_enabled(dev)) { 2899 msg = msix_get_message(dev, vector); 2900 } else if (msi_enabled(dev)) { 2901 msg = msi_get_message(dev, vector); 2902 } else { 2903 /* Should never happen */ 2904 error_report("%s: unknown interrupt type", __func__); 2905 abort(); 2906 } 2907 return msg; 2908 } 2909 2910 void pci_set_power(PCIDevice *d, bool state) 2911 { 2912 if (d->has_power == state) { 2913 return; 2914 } 2915 2916 d->has_power = state; 2917 pci_update_mappings(d); 2918 memory_region_set_enabled(&d->bus_master_enable_region, 2919 (pci_get_word(d->config + PCI_COMMAND) 2920 & PCI_COMMAND_MASTER) && d->has_power); 2921 if (!d->has_power) { 2922 pci_device_reset(d); 2923 } 2924 } 2925 2926 static const TypeInfo pci_device_type_info = { 2927 .name = TYPE_PCI_DEVICE, 2928 .parent = TYPE_DEVICE, 2929 .instance_size = sizeof(PCIDevice), 2930 .abstract = true, 2931 .class_size = sizeof(PCIDeviceClass), 2932 .class_init = pci_device_class_init, 2933 .class_base_init = pci_device_class_base_init, 2934 }; 2935 2936 static void pci_register_types(void) 2937 { 2938 type_register_static(&pci_bus_info); 2939 type_register_static(&pcie_bus_info); 2940 type_register_static(&cxl_bus_info); 2941 type_register_static(&conventional_pci_interface_info); 2942 type_register_static(&cxl_interface_info); 2943 type_register_static(&pcie_interface_info); 2944 type_register_static(&pci_device_type_info); 2945 } 2946 2947 type_init(pci_register_types) 2948