1 /* 2 * QEMU PCI bus manager 3 * 4 * Copyright (c) 2004 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "qemu/datadir.h" 27 #include "qemu/units.h" 28 #include "hw/irq.h" 29 #include "hw/pci/pci.h" 30 #include "hw/pci/pci_bridge.h" 31 #include "hw/pci/pci_bus.h" 32 #include "hw/pci/pci_host.h" 33 #include "hw/qdev-properties.h" 34 #include "hw/qdev-properties-system.h" 35 #include "migration/cpr.h" 36 #include "migration/qemu-file-types.h" 37 #include "migration/vmstate.h" 38 #include "net/net.h" 39 #include "system/numa.h" 40 #include "system/runstate.h" 41 #include "system/system.h" 42 #include "hw/loader.h" 43 #include "qemu/error-report.h" 44 #include "qemu/range.h" 45 #include "trace.h" 46 #include "hw/pci/msi.h" 47 #include "hw/pci/msix.h" 48 #include "hw/hotplug.h" 49 #include "hw/boards.h" 50 #include "hw/nvram/fw_cfg.h" 51 #include "qapi/error.h" 52 #include "qemu/cutils.h" 53 #include "pci-internal.h" 54 55 #include "hw/xen/xen.h" 56 #include "hw/i386/kvm/xen_evtchn.h" 57 58 bool pci_available = true; 59 60 static char *pcibus_get_dev_path(DeviceState *dev); 61 static char *pcibus_get_fw_dev_path(DeviceState *dev); 62 static void pcibus_reset_hold(Object *obj, ResetType type); 63 static bool pcie_has_upstream_port(PCIDevice *dev); 64 65 static void prop_pci_busnr_get(Object *obj, Visitor *v, const char *name, 66 void *opaque, Error **errp) 67 { 68 uint8_t busnr = pci_dev_bus_num(PCI_DEVICE(obj)); 69 70 visit_type_uint8(v, name, &busnr, errp); 71 } 72 73 static const PropertyInfo prop_pci_busnr = { 74 .type = "busnr", 75 .get = prop_pci_busnr_get, 76 }; 77 78 static const Property pci_props[] = { 79 DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1), 80 DEFINE_PROP_STRING("romfile", PCIDevice, romfile), 81 DEFINE_PROP_UINT32("romsize", PCIDevice, romsize, UINT32_MAX), 82 DEFINE_PROP_INT32("rombar", PCIDevice, rom_bar, -1), 83 DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present, 84 QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false), 85 DEFINE_PROP_BIT("x-pcie-lnksta-dllla", PCIDevice, cap_present, 86 QEMU_PCIE_LNKSTA_DLLLA_BITNR, true), 87 DEFINE_PROP_BIT("x-pcie-extcap-init", PCIDevice, cap_present, 88 QEMU_PCIE_EXTCAP_INIT_BITNR, true), 89 DEFINE_PROP_STRING("failover_pair_id", PCIDevice, 90 failover_pair_id), 91 DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0), 92 DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present, 93 QEMU_PCIE_ERR_UNC_MASK_BITNR, true), 94 DEFINE_PROP_BIT("x-pcie-ari-nextfn-1", PCIDevice, cap_present, 95 QEMU_PCIE_ARI_NEXTFN_1_BITNR, false), 96 DEFINE_PROP_SIZE32("x-max-bounce-buffer-size", PCIDevice, 97 max_bounce_buffer_size, DEFAULT_MAX_BOUNCE_BUFFER_SIZE), 98 DEFINE_PROP_STRING("sriov-pf", PCIDevice, sriov_pf), 99 DEFINE_PROP_BIT("x-pcie-ext-tag", PCIDevice, cap_present, 100 QEMU_PCIE_EXT_TAG_BITNR, true), 101 { .name = "busnr", .info = &prop_pci_busnr }, 102 }; 103 104 static const VMStateDescription vmstate_pcibus = { 105 .name = "PCIBUS", 106 .version_id = 1, 107 .minimum_version_id = 1, 108 .fields = (const VMStateField[]) { 109 VMSTATE_INT32_EQUAL(nirq, PCIBus, NULL), 110 VMSTATE_VARRAY_INT32(irq_count, PCIBus, 111 nirq, 0, vmstate_info_int32, 112 int32_t), 113 VMSTATE_END_OF_LIST() 114 } 115 }; 116 117 static gint g_cmp_uint32(gconstpointer a, gconstpointer b, gpointer user_data) 118 { 119 return a - b; 120 } 121 122 static GSequence *pci_acpi_index_list(void) 123 { 124 static GSequence *used_acpi_index_list; 125 126 if (!used_acpi_index_list) { 127 used_acpi_index_list = g_sequence_new(NULL); 128 } 129 return used_acpi_index_list; 130 } 131 132 static void pci_set_master(PCIDevice *d, bool enable) 133 { 134 memory_region_set_enabled(&d->bus_master_enable_region, enable); 135 d->is_master = enable; /* cache the status */ 136 } 137 138 static void pci_init_bus_master(PCIDevice *pci_dev) 139 { 140 AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev); 141 142 memory_region_init_alias(&pci_dev->bus_master_enable_region, 143 OBJECT(pci_dev), "bus master", 144 dma_as->root, 0, memory_region_size(dma_as->root)); 145 pci_set_master(pci_dev, false); 146 memory_region_add_subregion(&pci_dev->bus_master_container_region, 0, 147 &pci_dev->bus_master_enable_region); 148 } 149 150 static void pcibus_machine_done(Notifier *notifier, void *data) 151 { 152 PCIBus *bus = container_of(notifier, PCIBus, machine_done); 153 int i; 154 155 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 156 if (bus->devices[i]) { 157 pci_init_bus_master(bus->devices[i]); 158 } 159 } 160 } 161 162 static void pci_bus_realize(BusState *qbus, Error **errp) 163 { 164 PCIBus *bus = PCI_BUS(qbus); 165 166 bus->machine_done.notify = pcibus_machine_done; 167 qemu_add_machine_init_done_notifier(&bus->machine_done); 168 169 vmstate_register_any(NULL, &vmstate_pcibus, bus); 170 } 171 172 static void pcie_bus_realize(BusState *qbus, Error **errp) 173 { 174 PCIBus *bus = PCI_BUS(qbus); 175 Error *local_err = NULL; 176 177 pci_bus_realize(qbus, &local_err); 178 if (local_err) { 179 error_propagate(errp, local_err); 180 return; 181 } 182 183 /* 184 * A PCI-E bus can support extended config space if it's the root 185 * bus, or if the bus/bridge above it does as well 186 */ 187 if (pci_bus_is_root(bus)) { 188 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; 189 } else { 190 PCIBus *parent_bus = pci_get_bus(bus->parent_dev); 191 192 if (pci_bus_allows_extended_config_space(parent_bus)) { 193 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; 194 } 195 } 196 } 197 198 static void pci_bus_unrealize(BusState *qbus) 199 { 200 PCIBus *bus = PCI_BUS(qbus); 201 202 qemu_remove_machine_init_done_notifier(&bus->machine_done); 203 204 vmstate_unregister(NULL, &vmstate_pcibus, bus); 205 } 206 207 static int pcibus_num(PCIBus *bus) 208 { 209 if (pci_bus_is_root(bus)) { 210 return 0; /* pci host bridge */ 211 } 212 return bus->parent_dev->config[PCI_SECONDARY_BUS]; 213 } 214 215 static uint16_t pcibus_numa_node(PCIBus *bus) 216 { 217 return NUMA_NODE_UNASSIGNED; 218 } 219 220 bool pci_bus_add_fw_cfg_extra_pci_roots(FWCfgState *fw_cfg, 221 PCIBus *bus, 222 Error **errp) 223 { 224 Object *obj; 225 226 if (!bus) { 227 return true; 228 } 229 obj = OBJECT(bus); 230 231 return fw_cfg_add_file_from_generator(fw_cfg, obj->parent, 232 object_get_canonical_path_component(obj), 233 "etc/extra-pci-roots", errp); 234 } 235 236 static GByteArray *pci_bus_fw_cfg_gen_data(Object *obj, Error **errp) 237 { 238 PCIBus *bus = PCI_BUS(obj); 239 GByteArray *byte_array; 240 uint64_t extra_hosts = 0; 241 242 if (!bus) { 243 return NULL; 244 } 245 246 QLIST_FOREACH(bus, &bus->child, sibling) { 247 /* look for expander root buses */ 248 if (pci_bus_is_root(bus)) { 249 extra_hosts++; 250 } 251 } 252 253 if (!extra_hosts) { 254 return NULL; 255 } 256 extra_hosts = cpu_to_le64(extra_hosts); 257 258 byte_array = g_byte_array_new(); 259 g_byte_array_append(byte_array, 260 (const void *)&extra_hosts, sizeof(extra_hosts)); 261 262 return byte_array; 263 } 264 265 static void pci_bus_class_init(ObjectClass *klass, const void *data) 266 { 267 BusClass *k = BUS_CLASS(klass); 268 PCIBusClass *pbc = PCI_BUS_CLASS(klass); 269 ResettableClass *rc = RESETTABLE_CLASS(klass); 270 FWCfgDataGeneratorClass *fwgc = FW_CFG_DATA_GENERATOR_CLASS(klass); 271 272 k->print_dev = pcibus_dev_print; 273 k->get_dev_path = pcibus_get_dev_path; 274 k->get_fw_dev_path = pcibus_get_fw_dev_path; 275 k->realize = pci_bus_realize; 276 k->unrealize = pci_bus_unrealize; 277 278 rc->phases.hold = pcibus_reset_hold; 279 280 pbc->bus_num = pcibus_num; 281 pbc->numa_node = pcibus_numa_node; 282 283 fwgc->get_data = pci_bus_fw_cfg_gen_data; 284 } 285 286 static const TypeInfo pci_bus_info = { 287 .name = TYPE_PCI_BUS, 288 .parent = TYPE_BUS, 289 .instance_size = sizeof(PCIBus), 290 .class_size = sizeof(PCIBusClass), 291 .class_init = pci_bus_class_init, 292 .interfaces = (const InterfaceInfo[]) { 293 { TYPE_FW_CFG_DATA_GENERATOR_INTERFACE }, 294 { } 295 } 296 }; 297 298 static const TypeInfo cxl_interface_info = { 299 .name = INTERFACE_CXL_DEVICE, 300 .parent = TYPE_INTERFACE, 301 }; 302 303 static const TypeInfo pcie_interface_info = { 304 .name = INTERFACE_PCIE_DEVICE, 305 .parent = TYPE_INTERFACE, 306 }; 307 308 static const TypeInfo conventional_pci_interface_info = { 309 .name = INTERFACE_CONVENTIONAL_PCI_DEVICE, 310 .parent = TYPE_INTERFACE, 311 }; 312 313 static void pcie_bus_class_init(ObjectClass *klass, const void *data) 314 { 315 BusClass *k = BUS_CLASS(klass); 316 317 k->realize = pcie_bus_realize; 318 } 319 320 static const TypeInfo pcie_bus_info = { 321 .name = TYPE_PCIE_BUS, 322 .parent = TYPE_PCI_BUS, 323 .class_init = pcie_bus_class_init, 324 }; 325 326 static const TypeInfo cxl_bus_info = { 327 .name = TYPE_CXL_BUS, 328 .parent = TYPE_PCIE_BUS, 329 .class_init = pcie_bus_class_init, 330 }; 331 332 static void pci_update_mappings(PCIDevice *d); 333 static void pci_irq_handler(void *opaque, int irq_num, int level); 334 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, Error **); 335 static void pci_del_option_rom(PCIDevice *pdev); 336 337 static uint16_t pci_default_sub_vendor_id = PCI_SUBVENDOR_ID_REDHAT_QUMRANET; 338 static uint16_t pci_default_sub_device_id = PCI_SUBDEVICE_ID_QEMU; 339 340 PCIHostStateList pci_host_bridges; 341 342 int pci_bar(PCIDevice *d, int reg) 343 { 344 uint8_t type; 345 346 /* PCIe virtual functions do not have their own BARs */ 347 assert(!pci_is_vf(d)); 348 349 if (reg != PCI_ROM_SLOT) 350 return PCI_BASE_ADDRESS_0 + reg * 4; 351 352 type = d->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; 353 return type == PCI_HEADER_TYPE_BRIDGE ? PCI_ROM_ADDRESS1 : PCI_ROM_ADDRESS; 354 } 355 356 static inline int pci_irq_state(PCIDevice *d, int irq_num) 357 { 358 return (d->irq_state >> irq_num) & 0x1; 359 } 360 361 static inline void pci_set_irq_state(PCIDevice *d, int irq_num, int level) 362 { 363 d->irq_state &= ~(0x1 << irq_num); 364 d->irq_state |= level << irq_num; 365 } 366 367 static void pci_bus_change_irq_level(PCIBus *bus, int irq_num, int change) 368 { 369 assert(irq_num >= 0); 370 assert(irq_num < bus->nirq); 371 bus->irq_count[irq_num] += change; 372 bus->set_irq(bus->irq_opaque, irq_num, bus->irq_count[irq_num] != 0); 373 } 374 375 static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change) 376 { 377 PCIBus *bus; 378 for (;;) { 379 int dev_irq = irq_num; 380 bus = pci_get_bus(pci_dev); 381 assert(bus->map_irq); 382 irq_num = bus->map_irq(pci_dev, irq_num); 383 trace_pci_route_irq(dev_irq, DEVICE(pci_dev)->canonical_path, irq_num, 384 pci_bus_is_root(bus) ? "root-complex" 385 : DEVICE(bus->parent_dev)->canonical_path); 386 if (bus->set_irq) 387 break; 388 pci_dev = bus->parent_dev; 389 } 390 pci_bus_change_irq_level(bus, irq_num, change); 391 } 392 393 int pci_bus_get_irq_level(PCIBus *bus, int irq_num) 394 { 395 assert(irq_num >= 0); 396 assert(irq_num < bus->nirq); 397 return !!bus->irq_count[irq_num]; 398 } 399 400 /* Update interrupt status bit in config space on interrupt 401 * state change. */ 402 static void pci_update_irq_status(PCIDevice *dev) 403 { 404 if (dev->irq_state) { 405 dev->config[PCI_STATUS] |= PCI_STATUS_INTERRUPT; 406 } else { 407 dev->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; 408 } 409 } 410 411 void pci_device_deassert_intx(PCIDevice *dev) 412 { 413 int i; 414 for (i = 0; i < PCI_NUM_PINS; ++i) { 415 pci_irq_handler(dev, i, 0); 416 } 417 } 418 419 static void pci_msi_trigger(PCIDevice *dev, MSIMessage msg) 420 { 421 MemTxAttrs attrs = {}; 422 423 /* 424 * Xen uses the high bits of the address to contain some of the bits 425 * of the PIRQ#. Therefore we can't just send the write cycle and 426 * trust that it's caught by the APIC at 0xfee00000 because the 427 * target of the write might be e.g. 0x0x1000fee46000 for PIRQ#4166. 428 * So we intercept the delivery here instead of in kvm_send_msi(). 429 */ 430 if (xen_mode == XEN_EMULATE && 431 xen_evtchn_deliver_pirq_msi(msg.address, msg.data)) { 432 return; 433 } 434 attrs.requester_id = pci_requester_id(dev); 435 address_space_stl_le(&dev->bus_master_as, msg.address, msg.data, 436 attrs, NULL); 437 } 438 439 /* 440 * Register and track a PM capability. If wmask is also enabled for the power 441 * state field of the pmcsr register, guest writes may change the device PM 442 * state. BAR access is only enabled while the device is in the D0 state. 443 * Return the capability offset or negative error code. 444 */ 445 int pci_pm_init(PCIDevice *d, uint8_t offset, Error **errp) 446 { 447 int cap = pci_add_capability(d, PCI_CAP_ID_PM, offset, PCI_PM_SIZEOF, errp); 448 449 if (cap < 0) { 450 return cap; 451 } 452 453 d->pm_cap = cap; 454 d->cap_present |= QEMU_PCI_CAP_PM; 455 456 return cap; 457 } 458 459 static uint8_t pci_pm_state(PCIDevice *d) 460 { 461 uint16_t pmcsr; 462 463 if (!(d->cap_present & QEMU_PCI_CAP_PM)) { 464 return 0; 465 } 466 467 pmcsr = pci_get_word(d->config + d->pm_cap + PCI_PM_CTRL); 468 469 return pmcsr & PCI_PM_CTRL_STATE_MASK; 470 } 471 472 /* 473 * Update the PM capability state based on the new value stored in config 474 * space respective to the old, pre-write state provided. If the new value 475 * is rejected (unsupported or invalid transition) restore the old value. 476 * Return the resulting PM state. 477 */ 478 static uint8_t pci_pm_update(PCIDevice *d, uint32_t addr, int l, uint8_t old) 479 { 480 uint16_t pmc; 481 uint8_t new; 482 483 if (!(d->cap_present & QEMU_PCI_CAP_PM) || 484 !range_covers_byte(addr, l, d->pm_cap + PCI_PM_CTRL)) { 485 return old; 486 } 487 488 new = pci_pm_state(d); 489 if (new == old) { 490 return old; 491 } 492 493 pmc = pci_get_word(d->config + d->pm_cap + PCI_PM_PMC); 494 495 /* 496 * Transitions to D1 & D2 are only allowed if supported. Devices may 497 * only transition to higher D-states or to D0. 498 */ 499 if ((!(pmc & PCI_PM_CAP_D1) && new == 1) || 500 (!(pmc & PCI_PM_CAP_D2) && new == 2) || 501 (old && new && new < old)) { 502 pci_word_test_and_clear_mask(d->config + d->pm_cap + PCI_PM_CTRL, 503 PCI_PM_CTRL_STATE_MASK); 504 pci_word_test_and_set_mask(d->config + d->pm_cap + PCI_PM_CTRL, 505 old); 506 trace_pci_pm_bad_transition(d->name, pci_dev_bus_num(d), 507 PCI_SLOT(d->devfn), PCI_FUNC(d->devfn), 508 old, new); 509 return old; 510 } 511 512 trace_pci_pm_transition(d->name, pci_dev_bus_num(d), PCI_SLOT(d->devfn), 513 PCI_FUNC(d->devfn), old, new); 514 return new; 515 } 516 517 static void pci_reset_regions(PCIDevice *dev) 518 { 519 int r; 520 if (pci_is_vf(dev)) { 521 return; 522 } 523 524 for (r = 0; r < PCI_NUM_REGIONS; ++r) { 525 PCIIORegion *region = &dev->io_regions[r]; 526 if (!region->size) { 527 continue; 528 } 529 530 if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) && 531 region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 532 pci_set_quad(dev->config + pci_bar(dev, r), region->type); 533 } else { 534 pci_set_long(dev->config + pci_bar(dev, r), region->type); 535 } 536 } 537 } 538 539 static void pci_do_device_reset(PCIDevice *dev) 540 { 541 if ((dev->cap_present & QEMU_PCI_SKIP_RESET_ON_CPR) && cpr_is_incoming()) { 542 return; 543 } 544 545 pci_device_deassert_intx(dev); 546 assert(dev->irq_state == 0); 547 548 /* Clear all writable bits */ 549 pci_word_test_and_clear_mask(dev->config + PCI_COMMAND, 550 pci_get_word(dev->wmask + PCI_COMMAND) | 551 pci_get_word(dev->w1cmask + PCI_COMMAND)); 552 pci_word_test_and_clear_mask(dev->config + PCI_STATUS, 553 pci_get_word(dev->wmask + PCI_STATUS) | 554 pci_get_word(dev->w1cmask + PCI_STATUS)); 555 /* Some devices make bits of PCI_INTERRUPT_LINE read only */ 556 pci_byte_test_and_clear_mask(dev->config + PCI_INTERRUPT_LINE, 557 pci_get_word(dev->wmask + PCI_INTERRUPT_LINE) | 558 pci_get_word(dev->w1cmask + PCI_INTERRUPT_LINE)); 559 dev->config[PCI_CACHE_LINE_SIZE] = 0x0; 560 /* Default PM state is D0 */ 561 if (dev->cap_present & QEMU_PCI_CAP_PM) { 562 pci_word_test_and_clear_mask(dev->config + dev->pm_cap + PCI_PM_CTRL, 563 PCI_PM_CTRL_STATE_MASK); 564 } 565 pci_reset_regions(dev); 566 pci_update_mappings(dev); 567 568 msi_reset(dev); 569 msix_reset(dev); 570 pcie_sriov_pf_reset(dev); 571 } 572 573 /* 574 * This function is called on #RST and FLR. 575 * FLR if PCI_EXP_DEVCTL_BCR_FLR is set 576 */ 577 void pci_device_reset(PCIDevice *dev) 578 { 579 device_cold_reset(&dev->qdev); 580 pci_do_device_reset(dev); 581 } 582 583 /* 584 * Trigger pci bus reset under a given bus. 585 * Called via bus_cold_reset on RST# assert, after the devices 586 * have been reset device_cold_reset-ed already. 587 */ 588 static void pcibus_reset_hold(Object *obj, ResetType type) 589 { 590 PCIBus *bus = PCI_BUS(obj); 591 int i; 592 593 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 594 if (bus->devices[i]) { 595 pci_do_device_reset(bus->devices[i]); 596 } 597 } 598 599 for (i = 0; i < bus->nirq; i++) { 600 assert(bus->irq_count[i] == 0); 601 } 602 } 603 604 static void pci_host_bus_register(DeviceState *host) 605 { 606 PCIHostState *host_bridge = PCI_HOST_BRIDGE(host); 607 608 QLIST_INSERT_HEAD(&pci_host_bridges, host_bridge, next); 609 } 610 611 static void pci_host_bus_unregister(DeviceState *host) 612 { 613 PCIHostState *host_bridge = PCI_HOST_BRIDGE(host); 614 615 QLIST_REMOVE(host_bridge, next); 616 } 617 618 PCIBus *pci_device_root_bus(const PCIDevice *d) 619 { 620 PCIBus *bus = pci_get_bus(d); 621 622 while (!pci_bus_is_root(bus)) { 623 d = bus->parent_dev; 624 assert(d != NULL); 625 626 bus = pci_get_bus(d); 627 } 628 629 return bus; 630 } 631 632 const char *pci_root_bus_path(PCIDevice *dev) 633 { 634 PCIBus *rootbus = pci_device_root_bus(dev); 635 PCIHostState *host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent); 636 PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_GET_CLASS(host_bridge); 637 638 assert(host_bridge->bus == rootbus); 639 640 if (hc->root_bus_path) { 641 return (*hc->root_bus_path)(host_bridge, rootbus); 642 } 643 644 return rootbus->qbus.name; 645 } 646 647 bool pci_bus_bypass_iommu(PCIBus *bus) 648 { 649 PCIBus *rootbus = bus; 650 PCIHostState *host_bridge; 651 652 if (!pci_bus_is_root(bus)) { 653 rootbus = pci_device_root_bus(bus->parent_dev); 654 } 655 656 host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent); 657 658 assert(host_bridge->bus == rootbus); 659 660 return host_bridge->bypass_iommu; 661 } 662 663 static void pci_root_bus_internal_init(PCIBus *bus, DeviceState *parent, 664 MemoryRegion *mem, MemoryRegion *io, 665 uint8_t devfn_min) 666 { 667 assert(PCI_FUNC(devfn_min) == 0); 668 bus->devfn_min = devfn_min; 669 bus->slot_reserved_mask = 0x0; 670 bus->address_space_mem = mem; 671 bus->address_space_io = io; 672 bus->flags |= PCI_BUS_IS_ROOT; 673 674 /* host bridge */ 675 QLIST_INIT(&bus->child); 676 677 pci_host_bus_register(parent); 678 } 679 680 static void pci_bus_uninit(PCIBus *bus) 681 { 682 pci_host_bus_unregister(BUS(bus)->parent); 683 } 684 685 bool pci_bus_is_express(const PCIBus *bus) 686 { 687 return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS); 688 } 689 690 void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent, 691 const char *name, 692 MemoryRegion *mem, MemoryRegion *io, 693 uint8_t devfn_min, const char *typename) 694 { 695 qbus_init(bus, bus_size, typename, parent, name); 696 pci_root_bus_internal_init(bus, parent, mem, io, devfn_min); 697 } 698 699 PCIBus *pci_root_bus_new(DeviceState *parent, const char *name, 700 MemoryRegion *mem, MemoryRegion *io, 701 uint8_t devfn_min, const char *typename) 702 { 703 PCIBus *bus; 704 705 bus = PCI_BUS(qbus_new(typename, parent, name)); 706 pci_root_bus_internal_init(bus, parent, mem, io, devfn_min); 707 return bus; 708 } 709 710 void pci_root_bus_cleanup(PCIBus *bus) 711 { 712 pci_bus_uninit(bus); 713 /* the caller of the unplug hotplug handler will delete this device */ 714 qbus_unrealize(BUS(bus)); 715 } 716 717 void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, 718 void *irq_opaque, int nirq) 719 { 720 bus->set_irq = set_irq; 721 bus->irq_opaque = irq_opaque; 722 bus->nirq = nirq; 723 g_free(bus->irq_count); 724 bus->irq_count = g_malloc0(nirq * sizeof(bus->irq_count[0])); 725 } 726 727 void pci_bus_map_irqs(PCIBus *bus, pci_map_irq_fn map_irq) 728 { 729 bus->map_irq = map_irq; 730 } 731 732 void pci_bus_irqs_cleanup(PCIBus *bus) 733 { 734 bus->set_irq = NULL; 735 bus->map_irq = NULL; 736 bus->irq_opaque = NULL; 737 bus->nirq = 0; 738 g_free(bus->irq_count); 739 bus->irq_count = NULL; 740 } 741 742 PCIBus *pci_register_root_bus(DeviceState *parent, const char *name, 743 pci_set_irq_fn set_irq, pci_map_irq_fn map_irq, 744 void *irq_opaque, 745 MemoryRegion *mem, MemoryRegion *io, 746 uint8_t devfn_min, int nirq, 747 const char *typename) 748 { 749 PCIBus *bus; 750 751 bus = pci_root_bus_new(parent, name, mem, io, devfn_min, typename); 752 pci_bus_irqs(bus, set_irq, irq_opaque, nirq); 753 pci_bus_map_irqs(bus, map_irq); 754 return bus; 755 } 756 757 void pci_unregister_root_bus(PCIBus *bus) 758 { 759 pci_bus_irqs_cleanup(bus); 760 pci_root_bus_cleanup(bus); 761 } 762 763 int pci_bus_num(PCIBus *s) 764 { 765 return PCI_BUS_GET_CLASS(s)->bus_num(s); 766 } 767 768 /* Returns the min and max bus numbers of a PCI bus hierarchy */ 769 void pci_bus_range(PCIBus *bus, int *min_bus, int *max_bus) 770 { 771 int i; 772 *min_bus = *max_bus = pci_bus_num(bus); 773 774 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 775 PCIDevice *dev = bus->devices[i]; 776 777 if (dev && IS_PCI_BRIDGE(dev)) { 778 *min_bus = MIN(*min_bus, dev->config[PCI_SECONDARY_BUS]); 779 *max_bus = MAX(*max_bus, dev->config[PCI_SUBORDINATE_BUS]); 780 } 781 } 782 } 783 784 int pci_bus_numa_node(PCIBus *bus) 785 { 786 return PCI_BUS_GET_CLASS(bus)->numa_node(bus); 787 } 788 789 static int get_pci_config_device(QEMUFile *f, void *pv, size_t size, 790 const VMStateField *field) 791 { 792 PCIDevice *s = container_of(pv, PCIDevice, config); 793 uint8_t *config; 794 int i; 795 796 assert(size == pci_config_size(s)); 797 config = g_malloc(size); 798 799 qemu_get_buffer(f, config, size); 800 for (i = 0; i < size; ++i) { 801 if ((config[i] ^ s->config[i]) & 802 s->cmask[i] & ~s->wmask[i] & ~s->w1cmask[i]) { 803 error_report("%s: Bad config data: i=0x%x read: %x device: %x " 804 "cmask: %x wmask: %x w1cmask:%x", __func__, 805 i, config[i], s->config[i], 806 s->cmask[i], s->wmask[i], s->w1cmask[i]); 807 g_free(config); 808 return -EINVAL; 809 } 810 } 811 memcpy(s->config, config, size); 812 813 pci_update_mappings(s); 814 if (IS_PCI_BRIDGE(s)) { 815 pci_bridge_update_mappings(PCI_BRIDGE(s)); 816 } 817 818 pci_set_master(s, pci_get_word(s->config + PCI_COMMAND) 819 & PCI_COMMAND_MASTER); 820 821 g_free(config); 822 return 0; 823 } 824 825 /* just put buffer */ 826 static int put_pci_config_device(QEMUFile *f, void *pv, size_t size, 827 const VMStateField *field, JSONWriter *vmdesc) 828 { 829 const uint8_t **v = pv; 830 assert(size == pci_config_size(container_of(pv, PCIDevice, config))); 831 qemu_put_buffer(f, *v, size); 832 833 return 0; 834 } 835 836 static const VMStateInfo vmstate_info_pci_config = { 837 .name = "pci config", 838 .get = get_pci_config_device, 839 .put = put_pci_config_device, 840 }; 841 842 static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size, 843 const VMStateField *field) 844 { 845 PCIDevice *s = container_of(pv, PCIDevice, irq_state); 846 uint32_t irq_state[PCI_NUM_PINS]; 847 int i; 848 for (i = 0; i < PCI_NUM_PINS; ++i) { 849 irq_state[i] = qemu_get_be32(f); 850 if (irq_state[i] != 0x1 && irq_state[i] != 0) { 851 fprintf(stderr, "irq state %d: must be 0 or 1.\n", 852 irq_state[i]); 853 return -EINVAL; 854 } 855 } 856 857 for (i = 0; i < PCI_NUM_PINS; ++i) { 858 pci_set_irq_state(s, i, irq_state[i]); 859 } 860 861 return 0; 862 } 863 864 static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size, 865 const VMStateField *field, JSONWriter *vmdesc) 866 { 867 int i; 868 PCIDevice *s = container_of(pv, PCIDevice, irq_state); 869 870 for (i = 0; i < PCI_NUM_PINS; ++i) { 871 qemu_put_be32(f, pci_irq_state(s, i)); 872 } 873 874 return 0; 875 } 876 877 static const VMStateInfo vmstate_info_pci_irq_state = { 878 .name = "pci irq state", 879 .get = get_pci_irq_state, 880 .put = put_pci_irq_state, 881 }; 882 883 static bool migrate_is_pcie(void *opaque, int version_id) 884 { 885 return pci_is_express((PCIDevice *)opaque); 886 } 887 888 static bool migrate_is_not_pcie(void *opaque, int version_id) 889 { 890 return !pci_is_express((PCIDevice *)opaque); 891 } 892 893 static int pci_post_load(void *opaque, int version_id) 894 { 895 pcie_sriov_pf_post_load(opaque); 896 return 0; 897 } 898 899 const VMStateDescription vmstate_pci_device = { 900 .name = "PCIDevice", 901 .version_id = 2, 902 .minimum_version_id = 1, 903 .post_load = pci_post_load, 904 .fields = (const VMStateField[]) { 905 VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice), 906 VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, 907 migrate_is_not_pcie, 908 0, vmstate_info_pci_config, 909 PCI_CONFIG_SPACE_SIZE), 910 VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, 911 migrate_is_pcie, 912 0, vmstate_info_pci_config, 913 PCIE_CONFIG_SPACE_SIZE), 914 VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2, 915 vmstate_info_pci_irq_state, 916 PCI_NUM_PINS * sizeof(int32_t)), 917 VMSTATE_END_OF_LIST() 918 } 919 }; 920 921 922 void pci_device_save(PCIDevice *s, QEMUFile *f) 923 { 924 Error *local_err = NULL; 925 int ret; 926 927 /* Clear interrupt status bit: it is implicit 928 * in irq_state which we are saving. 929 * This makes us compatible with old devices 930 * which never set or clear this bit. */ 931 s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; 932 ret = vmstate_save_state(f, &vmstate_pci_device, s, NULL, &local_err); 933 if (ret < 0) { 934 error_report_err(local_err); 935 } 936 /* Restore the interrupt status bit. */ 937 pci_update_irq_status(s); 938 } 939 940 int pci_device_load(PCIDevice *s, QEMUFile *f) 941 { 942 Error *local_err = NULL; 943 int ret; 944 945 ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id, 946 &local_err); 947 if (ret < 0) { 948 error_report_err(local_err); 949 } 950 /* Restore the interrupt status bit. */ 951 pci_update_irq_status(s); 952 return ret; 953 } 954 955 static void pci_set_default_subsystem_id(PCIDevice *pci_dev) 956 { 957 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, 958 pci_default_sub_vendor_id); 959 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 960 pci_default_sub_device_id); 961 } 962 963 /* 964 * Parse [[<domain>:]<bus>:]<slot>, return -1 on error if funcp == NULL 965 * [[<domain>:]<bus>:]<slot>.<func>, return -1 on error 966 */ 967 static int pci_parse_devaddr(const char *addr, int *domp, int *busp, 968 unsigned int *slotp, unsigned int *funcp) 969 { 970 const char *p; 971 char *e; 972 unsigned long val; 973 unsigned long dom = 0, bus = 0; 974 unsigned int slot = 0; 975 unsigned int func = 0; 976 977 p = addr; 978 val = strtoul(p, &e, 16); 979 if (e == p) 980 return -1; 981 if (*e == ':') { 982 bus = val; 983 p = e + 1; 984 val = strtoul(p, &e, 16); 985 if (e == p) 986 return -1; 987 if (*e == ':') { 988 dom = bus; 989 bus = val; 990 p = e + 1; 991 val = strtoul(p, &e, 16); 992 if (e == p) 993 return -1; 994 } 995 } 996 997 slot = val; 998 999 if (funcp != NULL && *e != '\0') { 1000 if (*e != '.') { 1001 return -1; 1002 } 1003 p = e + 1; 1004 val = strtoul(p, &e, 16); 1005 if (e == p) { 1006 return -1; 1007 } 1008 1009 func = val; 1010 } 1011 1012 /* if funcp == NULL func is 0 */ 1013 if (dom > 0xffff || bus > 0xff || slot > 0x1f || func > 7) 1014 return -1; 1015 1016 if (*e) 1017 return -1; 1018 1019 *domp = dom; 1020 *busp = bus; 1021 *slotp = slot; 1022 if (funcp != NULL) 1023 *funcp = func; 1024 return 0; 1025 } 1026 1027 static void pci_init_cmask(PCIDevice *dev) 1028 { 1029 pci_set_word(dev->cmask + PCI_VENDOR_ID, 0xffff); 1030 pci_set_word(dev->cmask + PCI_DEVICE_ID, 0xffff); 1031 dev->cmask[PCI_STATUS] = PCI_STATUS_CAP_LIST; 1032 dev->cmask[PCI_REVISION_ID] = 0xff; 1033 dev->cmask[PCI_CLASS_PROG] = 0xff; 1034 pci_set_word(dev->cmask + PCI_CLASS_DEVICE, 0xffff); 1035 dev->cmask[PCI_HEADER_TYPE] = 0xff; 1036 dev->cmask[PCI_CAPABILITY_LIST] = 0xff; 1037 } 1038 1039 static void pci_init_wmask(PCIDevice *dev) 1040 { 1041 int config_size = pci_config_size(dev); 1042 1043 dev->wmask[PCI_CACHE_LINE_SIZE] = 0xff; 1044 dev->wmask[PCI_INTERRUPT_LINE] = 0xff; 1045 pci_set_word(dev->wmask + PCI_COMMAND, 1046 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | 1047 PCI_COMMAND_INTX_DISABLE); 1048 pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR); 1049 1050 memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff, 1051 config_size - PCI_CONFIG_HEADER_SIZE); 1052 } 1053 1054 static void pci_init_w1cmask(PCIDevice *dev) 1055 { 1056 /* 1057 * Note: It's okay to set w1cmask even for readonly bits as 1058 * long as their value is hardwired to 0. 1059 */ 1060 pci_set_word(dev->w1cmask + PCI_STATUS, 1061 PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | 1062 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT | 1063 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY); 1064 } 1065 1066 static void pci_init_mask_bridge(PCIDevice *d) 1067 { 1068 /* PCI_PRIMARY_BUS, PCI_SECONDARY_BUS, PCI_SUBORDINATE_BUS and 1069 PCI_SEC_LATENCY_TIMER */ 1070 memset(d->wmask + PCI_PRIMARY_BUS, 0xff, 4); 1071 1072 /* base and limit */ 1073 d->wmask[PCI_IO_BASE] = PCI_IO_RANGE_MASK & 0xff; 1074 d->wmask[PCI_IO_LIMIT] = PCI_IO_RANGE_MASK & 0xff; 1075 pci_set_word(d->wmask + PCI_MEMORY_BASE, 1076 PCI_MEMORY_RANGE_MASK & 0xffff); 1077 pci_set_word(d->wmask + PCI_MEMORY_LIMIT, 1078 PCI_MEMORY_RANGE_MASK & 0xffff); 1079 pci_set_word(d->wmask + PCI_PREF_MEMORY_BASE, 1080 PCI_PREF_RANGE_MASK & 0xffff); 1081 pci_set_word(d->wmask + PCI_PREF_MEMORY_LIMIT, 1082 PCI_PREF_RANGE_MASK & 0xffff); 1083 1084 /* PCI_PREF_BASE_UPPER32 and PCI_PREF_LIMIT_UPPER32 */ 1085 memset(d->wmask + PCI_PREF_BASE_UPPER32, 0xff, 8); 1086 1087 /* Supported memory and i/o types */ 1088 d->config[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_16; 1089 d->config[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_16; 1090 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_BASE, 1091 PCI_PREF_RANGE_TYPE_64); 1092 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_LIMIT, 1093 PCI_PREF_RANGE_TYPE_64); 1094 1095 /* 1096 * TODO: Bridges default to 10-bit VGA decoding but we currently only 1097 * implement 16-bit decoding (no alias support). 1098 */ 1099 pci_set_word(d->wmask + PCI_BRIDGE_CONTROL, 1100 PCI_BRIDGE_CTL_PARITY | 1101 PCI_BRIDGE_CTL_SERR | 1102 PCI_BRIDGE_CTL_ISA | 1103 PCI_BRIDGE_CTL_VGA | 1104 PCI_BRIDGE_CTL_VGA_16BIT | 1105 PCI_BRIDGE_CTL_MASTER_ABORT | 1106 PCI_BRIDGE_CTL_BUS_RESET | 1107 PCI_BRIDGE_CTL_FAST_BACK | 1108 PCI_BRIDGE_CTL_DISCARD | 1109 PCI_BRIDGE_CTL_SEC_DISCARD | 1110 PCI_BRIDGE_CTL_DISCARD_SERR); 1111 /* Below does not do anything as we never set this bit, put here for 1112 * completeness. */ 1113 pci_set_word(d->w1cmask + PCI_BRIDGE_CONTROL, 1114 PCI_BRIDGE_CTL_DISCARD_STATUS); 1115 d->cmask[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_MASK; 1116 d->cmask[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_MASK; 1117 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_BASE, 1118 PCI_PREF_RANGE_TYPE_MASK); 1119 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_LIMIT, 1120 PCI_PREF_RANGE_TYPE_MASK); 1121 } 1122 1123 static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp) 1124 { 1125 uint8_t slot = PCI_SLOT(dev->devfn); 1126 uint8_t func; 1127 1128 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 1129 dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; 1130 } 1131 1132 /* SR/IOV is not handled here. */ 1133 if (pci_is_vf(dev)) { 1134 return; 1135 } 1136 1137 /* 1138 * multifunction bit is interpreted in two ways as follows. 1139 * - all functions must set the bit to 1. 1140 * Example: Intel X53 1141 * - function 0 must set the bit, but the rest function (> 0) 1142 * is allowed to leave the bit to 0. 1143 * Example: PIIX3(also in qemu), PIIX4(also in qemu), ICH10, 1144 * 1145 * So OS (at least Linux) checks the bit of only function 0, 1146 * and doesn't see the bit of function > 0. 1147 * 1148 * The below check allows both interpretation. 1149 */ 1150 if (PCI_FUNC(dev->devfn)) { 1151 PCIDevice *f0 = bus->devices[PCI_DEVFN(slot, 0)]; 1152 if (f0 && !(f0->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) { 1153 /* function 0 should set multifunction bit */ 1154 error_setg(errp, "PCI: single function device can't be populated " 1155 "in function %x.%x", slot, PCI_FUNC(dev->devfn)); 1156 return; 1157 } 1158 return; 1159 } 1160 1161 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 1162 return; 1163 } 1164 /* function 0 indicates single function, so function > 0 must be NULL */ 1165 for (func = 1; func < PCI_FUNC_MAX; ++func) { 1166 PCIDevice *device = bus->devices[PCI_DEVFN(slot, func)]; 1167 if (device && !pci_is_vf(device)) { 1168 error_setg(errp, "PCI: %x.0 indicates single function, " 1169 "but %x.%x is already populated.", 1170 slot, slot, func); 1171 return; 1172 } 1173 } 1174 } 1175 1176 static void pci_config_alloc(PCIDevice *pci_dev) 1177 { 1178 int config_size = pci_config_size(pci_dev); 1179 1180 pci_dev->config = g_malloc0(config_size); 1181 pci_dev->cmask = g_malloc0(config_size); 1182 pci_dev->wmask = g_malloc0(config_size); 1183 pci_dev->w1cmask = g_malloc0(config_size); 1184 pci_dev->used = g_malloc0(config_size); 1185 } 1186 1187 static void pci_config_free(PCIDevice *pci_dev) 1188 { 1189 g_free(pci_dev->config); 1190 g_free(pci_dev->cmask); 1191 g_free(pci_dev->wmask); 1192 g_free(pci_dev->w1cmask); 1193 g_free(pci_dev->used); 1194 } 1195 1196 static void do_pci_unregister_device(PCIDevice *pci_dev) 1197 { 1198 pci_get_bus(pci_dev)->devices[pci_dev->devfn] = NULL; 1199 pci_config_free(pci_dev); 1200 1201 if (xen_mode == XEN_EMULATE) { 1202 xen_evtchn_remove_pci_device(pci_dev); 1203 } 1204 if (memory_region_is_mapped(&pci_dev->bus_master_enable_region)) { 1205 memory_region_del_subregion(&pci_dev->bus_master_container_region, 1206 &pci_dev->bus_master_enable_region); 1207 } 1208 address_space_destroy(&pci_dev->bus_master_as); 1209 } 1210 1211 /* Extract PCIReqIDCache into BDF format */ 1212 static uint16_t pci_req_id_cache_extract(PCIReqIDCache *cache) 1213 { 1214 uint8_t bus_n; 1215 uint16_t result; 1216 1217 switch (cache->type) { 1218 case PCI_REQ_ID_BDF: 1219 result = pci_get_bdf(cache->dev); 1220 break; 1221 case PCI_REQ_ID_SECONDARY_BUS: 1222 bus_n = pci_dev_bus_num(cache->dev); 1223 result = PCI_BUILD_BDF(bus_n, 0); 1224 break; 1225 default: 1226 error_report("Invalid PCI requester ID cache type: %d", 1227 cache->type); 1228 exit(1); 1229 break; 1230 } 1231 1232 return result; 1233 } 1234 1235 /* Parse bridges up to the root complex and return requester ID 1236 * cache for specific device. For full PCIe topology, the cache 1237 * result would be exactly the same as getting BDF of the device. 1238 * However, several tricks are required when system mixed up with 1239 * legacy PCI devices and PCIe-to-PCI bridges. 1240 * 1241 * Here we cache the proxy device (and type) not requester ID since 1242 * bus number might change from time to time. 1243 */ 1244 static PCIReqIDCache pci_req_id_cache_get(PCIDevice *dev) 1245 { 1246 PCIDevice *parent; 1247 PCIReqIDCache cache = { 1248 .dev = dev, 1249 .type = PCI_REQ_ID_BDF, 1250 }; 1251 1252 while (!pci_bus_is_root(pci_get_bus(dev))) { 1253 /* We are under PCI/PCIe bridges */ 1254 parent = pci_get_bus(dev)->parent_dev; 1255 if (pci_is_express(parent)) { 1256 if (pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) { 1257 /* When we pass through PCIe-to-PCI/PCIX bridges, we 1258 * override the requester ID using secondary bus 1259 * number of parent bridge with zeroed devfn 1260 * (pcie-to-pci bridge spec chap 2.3). */ 1261 cache.type = PCI_REQ_ID_SECONDARY_BUS; 1262 cache.dev = dev; 1263 } 1264 } else { 1265 /* Legacy PCI, override requester ID with the bridge's 1266 * BDF upstream. When the root complex connects to 1267 * legacy PCI devices (including buses), it can only 1268 * obtain requester ID info from directly attached 1269 * devices. If devices are attached under bridges, only 1270 * the requester ID of the bridge that is directly 1271 * attached to the root complex can be recognized. */ 1272 cache.type = PCI_REQ_ID_BDF; 1273 cache.dev = parent; 1274 } 1275 dev = parent; 1276 } 1277 1278 return cache; 1279 } 1280 1281 uint16_t pci_requester_id(PCIDevice *dev) 1282 { 1283 return pci_req_id_cache_extract(&dev->requester_id_cache); 1284 } 1285 1286 static bool pci_bus_devfn_available(PCIBus *bus, int devfn) 1287 { 1288 return !(bus->devices[devfn]); 1289 } 1290 1291 static bool pci_bus_devfn_reserved(PCIBus *bus, int devfn) 1292 { 1293 return bus->slot_reserved_mask & (1UL << PCI_SLOT(devfn)); 1294 } 1295 1296 uint32_t pci_bus_get_slot_reserved_mask(PCIBus *bus) 1297 { 1298 return bus->slot_reserved_mask; 1299 } 1300 1301 void pci_bus_set_slot_reserved_mask(PCIBus *bus, uint32_t mask) 1302 { 1303 bus->slot_reserved_mask |= mask; 1304 } 1305 1306 void pci_bus_clear_slot_reserved_mask(PCIBus *bus, uint32_t mask) 1307 { 1308 bus->slot_reserved_mask &= ~mask; 1309 } 1310 1311 /* -1 for devfn means auto assign */ 1312 static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, 1313 const char *name, int devfn, 1314 Error **errp) 1315 { 1316 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 1317 PCIConfigReadFunc *config_read = pc->config_read; 1318 PCIConfigWriteFunc *config_write = pc->config_write; 1319 Error *local_err = NULL; 1320 DeviceState *dev = DEVICE(pci_dev); 1321 PCIBus *bus = pci_get_bus(pci_dev); 1322 bool is_bridge = IS_PCI_BRIDGE(pci_dev); 1323 1324 /* Only pci bridges can be attached to extra PCI root buses */ 1325 if (pci_bus_is_root(bus) && bus->parent_dev && !is_bridge) { 1326 error_setg(errp, 1327 "PCI: Only PCI/PCIe bridges can be plugged into %s", 1328 bus->parent_dev->name); 1329 return NULL; 1330 } 1331 1332 if (devfn < 0) { 1333 for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices); 1334 devfn += PCI_FUNC_MAX) { 1335 if (pci_bus_devfn_available(bus, devfn) && 1336 !pci_bus_devfn_reserved(bus, devfn)) { 1337 goto found; 1338 } 1339 } 1340 error_setg(errp, "PCI: no slot/function available for %s, all in use " 1341 "or reserved", name); 1342 return NULL; 1343 found: ; 1344 } else if (pci_bus_devfn_reserved(bus, devfn)) { 1345 error_setg(errp, "PCI: slot %d function %d not available for %s," 1346 " reserved", 1347 PCI_SLOT(devfn), PCI_FUNC(devfn), name); 1348 return NULL; 1349 } else if (!pci_bus_devfn_available(bus, devfn)) { 1350 error_setg(errp, "PCI: slot %d function %d not available for %s," 1351 " in use by %s,id=%s", 1352 PCI_SLOT(devfn), PCI_FUNC(devfn), name, 1353 bus->devices[devfn]->name, bus->devices[devfn]->qdev.id); 1354 return NULL; 1355 } 1356 1357 /* 1358 * Populating function 0 triggers a scan from the guest that 1359 * exposes other non-zero functions. Hence we need to ensure that 1360 * function 0 wasn't added yet. 1361 */ 1362 if (dev->hotplugged && !pci_is_vf(pci_dev) && 1363 pci_get_function_0(pci_dev)) { 1364 error_setg(errp, "PCI: slot %d function 0 already occupied by %s," 1365 " new func %s cannot be exposed to guest.", 1366 PCI_SLOT(pci_get_function_0(pci_dev)->devfn), 1367 pci_get_function_0(pci_dev)->name, 1368 name); 1369 1370 return NULL; 1371 } 1372 1373 pci_dev->devfn = devfn; 1374 pci_dev->requester_id_cache = pci_req_id_cache_get(pci_dev); 1375 pstrcpy(pci_dev->name, sizeof(pci_dev->name), name); 1376 1377 memory_region_init(&pci_dev->bus_master_container_region, OBJECT(pci_dev), 1378 "bus master container", UINT64_MAX); 1379 address_space_init(&pci_dev->bus_master_as, 1380 &pci_dev->bus_master_container_region, pci_dev->name); 1381 pci_dev->bus_master_as.max_bounce_buffer_size = 1382 pci_dev->max_bounce_buffer_size; 1383 1384 if (phase_check(PHASE_MACHINE_READY)) { 1385 pci_init_bus_master(pci_dev); 1386 } 1387 pci_dev->irq_state = 0; 1388 pci_config_alloc(pci_dev); 1389 1390 pci_config_set_vendor_id(pci_dev->config, pc->vendor_id); 1391 pci_config_set_device_id(pci_dev->config, pc->device_id); 1392 pci_config_set_revision(pci_dev->config, pc->revision); 1393 pci_config_set_class(pci_dev->config, pc->class_id); 1394 1395 if (!is_bridge) { 1396 if (pc->subsystem_vendor_id || pc->subsystem_id) { 1397 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, 1398 pc->subsystem_vendor_id); 1399 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 1400 pc->subsystem_id); 1401 } else { 1402 pci_set_default_subsystem_id(pci_dev); 1403 } 1404 } else { 1405 /* subsystem_vendor_id/subsystem_id are only for header type 0 */ 1406 assert(!pc->subsystem_vendor_id); 1407 assert(!pc->subsystem_id); 1408 } 1409 pci_init_cmask(pci_dev); 1410 pci_init_wmask(pci_dev); 1411 pci_init_w1cmask(pci_dev); 1412 if (is_bridge) { 1413 pci_init_mask_bridge(pci_dev); 1414 } 1415 pci_init_multifunction(bus, pci_dev, &local_err); 1416 if (local_err) { 1417 error_propagate(errp, local_err); 1418 do_pci_unregister_device(pci_dev); 1419 return NULL; 1420 } 1421 1422 if (!config_read) 1423 config_read = pci_default_read_config; 1424 if (!config_write) 1425 config_write = pci_default_write_config; 1426 pci_dev->config_read = config_read; 1427 pci_dev->config_write = config_write; 1428 bus->devices[devfn] = pci_dev; 1429 pci_dev->version_id = 2; /* Current pci device vmstate version */ 1430 return pci_dev; 1431 } 1432 1433 static void pci_unregister_io_regions(PCIDevice *pci_dev) 1434 { 1435 PCIIORegion *r; 1436 int i; 1437 1438 for(i = 0; i < PCI_NUM_REGIONS; i++) { 1439 r = &pci_dev->io_regions[i]; 1440 if (!r->size || r->addr == PCI_BAR_UNMAPPED) 1441 continue; 1442 memory_region_del_subregion(r->address_space, r->memory); 1443 } 1444 1445 pci_unregister_vga(pci_dev); 1446 } 1447 1448 static void pci_qdev_unrealize(DeviceState *dev) 1449 { 1450 PCIDevice *pci_dev = PCI_DEVICE(dev); 1451 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 1452 1453 pci_unregister_io_regions(pci_dev); 1454 pci_del_option_rom(pci_dev); 1455 pcie_sriov_unregister_device(pci_dev); 1456 1457 if (pc->exit) { 1458 pc->exit(pci_dev); 1459 } 1460 1461 pci_device_deassert_intx(pci_dev); 1462 do_pci_unregister_device(pci_dev); 1463 1464 pci_dev->msi_trigger = NULL; 1465 1466 /* 1467 * clean up acpi-index so it could reused by another device 1468 */ 1469 if (pci_dev->acpi_index) { 1470 GSequence *used_indexes = pci_acpi_index_list(); 1471 1472 g_sequence_remove(g_sequence_lookup(used_indexes, 1473 GINT_TO_POINTER(pci_dev->acpi_index), 1474 g_cmp_uint32, NULL)); 1475 } 1476 } 1477 1478 void pci_register_bar(PCIDevice *pci_dev, int region_num, 1479 uint8_t type, MemoryRegion *memory) 1480 { 1481 PCIIORegion *r; 1482 uint32_t addr; /* offset in pci config space */ 1483 uint64_t wmask; 1484 pcibus_t size = memory_region_size(memory); 1485 uint8_t hdr_type; 1486 1487 assert(region_num >= 0); 1488 assert(region_num < PCI_NUM_REGIONS); 1489 assert(is_power_of_2(size)); 1490 1491 /* A PCI bridge device (with Type 1 header) may only have at most 2 BARs */ 1492 hdr_type = 1493 pci_dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; 1494 assert(hdr_type != PCI_HEADER_TYPE_BRIDGE || region_num < 2); 1495 1496 r = &pci_dev->io_regions[region_num]; 1497 assert(!r->size); 1498 r->size = size; 1499 r->type = type; 1500 r->memory = memory; 1501 r->address_space = type & PCI_BASE_ADDRESS_SPACE_IO 1502 ? pci_get_bus(pci_dev)->address_space_io 1503 : pci_get_bus(pci_dev)->address_space_mem; 1504 1505 if (pci_is_vf(pci_dev)) { 1506 r->addr = pci_bar_address(pci_dev, region_num, r->type, r->size); 1507 if (r->addr != PCI_BAR_UNMAPPED) { 1508 memory_region_add_subregion_overlap(r->address_space, 1509 r->addr, r->memory, 1); 1510 } 1511 } else { 1512 r->addr = PCI_BAR_UNMAPPED; 1513 1514 wmask = ~(size - 1); 1515 if (region_num == PCI_ROM_SLOT) { 1516 /* ROM enable bit is writable */ 1517 wmask |= PCI_ROM_ADDRESS_ENABLE; 1518 } 1519 1520 addr = pci_bar(pci_dev, region_num); 1521 pci_set_long(pci_dev->config + addr, type); 1522 1523 if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) && 1524 r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1525 pci_set_quad(pci_dev->wmask + addr, wmask); 1526 pci_set_quad(pci_dev->cmask + addr, ~0ULL); 1527 } else { 1528 pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff); 1529 pci_set_long(pci_dev->cmask + addr, 0xffffffff); 1530 } 1531 } 1532 } 1533 1534 static void pci_update_vga(PCIDevice *pci_dev) 1535 { 1536 uint16_t cmd; 1537 1538 if (!pci_dev->has_vga) { 1539 return; 1540 } 1541 1542 cmd = pci_get_word(pci_dev->config + PCI_COMMAND); 1543 1544 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_MEM], 1545 cmd & PCI_COMMAND_MEMORY); 1546 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO], 1547 cmd & PCI_COMMAND_IO); 1548 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI], 1549 cmd & PCI_COMMAND_IO); 1550 } 1551 1552 void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem, 1553 MemoryRegion *io_lo, MemoryRegion *io_hi) 1554 { 1555 PCIBus *bus = pci_get_bus(pci_dev); 1556 1557 assert(!pci_dev->has_vga); 1558 1559 assert(memory_region_size(mem) == QEMU_PCI_VGA_MEM_SIZE); 1560 pci_dev->vga_regions[QEMU_PCI_VGA_MEM] = mem; 1561 memory_region_add_subregion_overlap(bus->address_space_mem, 1562 QEMU_PCI_VGA_MEM_BASE, mem, 1); 1563 1564 assert(memory_region_size(io_lo) == QEMU_PCI_VGA_IO_LO_SIZE); 1565 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO] = io_lo; 1566 memory_region_add_subregion_overlap(bus->address_space_io, 1567 QEMU_PCI_VGA_IO_LO_BASE, io_lo, 1); 1568 1569 assert(memory_region_size(io_hi) == QEMU_PCI_VGA_IO_HI_SIZE); 1570 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI] = io_hi; 1571 memory_region_add_subregion_overlap(bus->address_space_io, 1572 QEMU_PCI_VGA_IO_HI_BASE, io_hi, 1); 1573 pci_dev->has_vga = true; 1574 1575 pci_update_vga(pci_dev); 1576 } 1577 1578 void pci_unregister_vga(PCIDevice *pci_dev) 1579 { 1580 PCIBus *bus = pci_get_bus(pci_dev); 1581 1582 if (!pci_dev->has_vga) { 1583 return; 1584 } 1585 1586 memory_region_del_subregion(bus->address_space_mem, 1587 pci_dev->vga_regions[QEMU_PCI_VGA_MEM]); 1588 memory_region_del_subregion(bus->address_space_io, 1589 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO]); 1590 memory_region_del_subregion(bus->address_space_io, 1591 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI]); 1592 pci_dev->has_vga = false; 1593 } 1594 1595 pcibus_t pci_get_bar_addr(PCIDevice *pci_dev, int region_num) 1596 { 1597 return pci_dev->io_regions[region_num].addr; 1598 } 1599 1600 static pcibus_t pci_config_get_bar_addr(PCIDevice *d, int reg, 1601 uint8_t type, pcibus_t size) 1602 { 1603 pcibus_t new_addr; 1604 if (!pci_is_vf(d)) { 1605 int bar = pci_bar(d, reg); 1606 if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1607 new_addr = pci_get_quad(d->config + bar); 1608 } else { 1609 new_addr = pci_get_long(d->config + bar); 1610 } 1611 } else { 1612 PCIDevice *pf = d->exp.sriov_vf.pf; 1613 uint16_t sriov_cap = pf->exp.sriov_cap; 1614 int bar = sriov_cap + PCI_SRIOV_BAR + reg * 4; 1615 uint16_t vf_offset = 1616 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_OFFSET); 1617 uint16_t vf_stride = 1618 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_STRIDE); 1619 uint32_t vf_num = d->devfn - (pf->devfn + vf_offset); 1620 1621 if (vf_num) { 1622 vf_num /= vf_stride; 1623 } 1624 1625 if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1626 new_addr = pci_get_quad(pf->config + bar); 1627 } else { 1628 new_addr = pci_get_long(pf->config + bar); 1629 } 1630 new_addr += vf_num * size; 1631 } 1632 /* The ROM slot has a specific enable bit, keep it intact */ 1633 if (reg != PCI_ROM_SLOT) { 1634 new_addr &= ~(size - 1); 1635 } 1636 return new_addr; 1637 } 1638 1639 pcibus_t pci_bar_address(PCIDevice *d, 1640 int reg, uint8_t type, pcibus_t size) 1641 { 1642 pcibus_t new_addr, last_addr; 1643 uint16_t cmd = pci_get_word(d->config + PCI_COMMAND); 1644 MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); 1645 bool allow_0_address = mc->pci_allow_0_address; 1646 1647 if (type & PCI_BASE_ADDRESS_SPACE_IO) { 1648 if (!(cmd & PCI_COMMAND_IO)) { 1649 return PCI_BAR_UNMAPPED; 1650 } 1651 new_addr = pci_config_get_bar_addr(d, reg, type, size); 1652 last_addr = new_addr + size - 1; 1653 /* Check if 32 bit BAR wraps around explicitly. 1654 * TODO: make priorities correct and remove this work around. 1655 */ 1656 if (last_addr <= new_addr || last_addr >= UINT32_MAX || 1657 (!allow_0_address && new_addr == 0)) { 1658 return PCI_BAR_UNMAPPED; 1659 } 1660 return new_addr; 1661 } 1662 1663 if (!(cmd & PCI_COMMAND_MEMORY)) { 1664 return PCI_BAR_UNMAPPED; 1665 } 1666 new_addr = pci_config_get_bar_addr(d, reg, type, size); 1667 /* the ROM slot has a specific enable bit */ 1668 if (reg == PCI_ROM_SLOT && !(new_addr & PCI_ROM_ADDRESS_ENABLE)) { 1669 return PCI_BAR_UNMAPPED; 1670 } 1671 new_addr &= ~(size - 1); 1672 last_addr = new_addr + size - 1; 1673 /* NOTE: we do not support wrapping */ 1674 /* XXX: as we cannot support really dynamic 1675 mappings, we handle specific values as invalid 1676 mappings. */ 1677 if (last_addr <= new_addr || last_addr == PCI_BAR_UNMAPPED || 1678 (!allow_0_address && new_addr == 0)) { 1679 return PCI_BAR_UNMAPPED; 1680 } 1681 1682 /* Now pcibus_t is 64bit. 1683 * Check if 32 bit BAR wraps around explicitly. 1684 * Without this, PC ide doesn't work well. 1685 * TODO: remove this work around. 1686 */ 1687 if (!(type & PCI_BASE_ADDRESS_MEM_TYPE_64) && last_addr >= UINT32_MAX) { 1688 return PCI_BAR_UNMAPPED; 1689 } 1690 1691 /* 1692 * OS is allowed to set BAR beyond its addressable 1693 * bits. For example, 32 bit OS can set 64bit bar 1694 * to >4G. Check it. TODO: we might need to support 1695 * it in the future for e.g. PAE. 1696 */ 1697 if (last_addr >= HWADDR_MAX) { 1698 return PCI_BAR_UNMAPPED; 1699 } 1700 1701 return new_addr; 1702 } 1703 1704 static void pci_update_mappings(PCIDevice *d) 1705 { 1706 PCIIORegion *r; 1707 int i; 1708 pcibus_t new_addr; 1709 1710 for(i = 0; i < PCI_NUM_REGIONS; i++) { 1711 r = &d->io_regions[i]; 1712 1713 /* this region isn't registered */ 1714 if (!r->size) 1715 continue; 1716 1717 new_addr = pci_bar_address(d, i, r->type, r->size); 1718 if (!d->enabled || pci_pm_state(d)) { 1719 new_addr = PCI_BAR_UNMAPPED; 1720 } 1721 1722 /* This bar isn't changed */ 1723 if (new_addr == r->addr) 1724 continue; 1725 1726 /* now do the real mapping */ 1727 if (r->addr != PCI_BAR_UNMAPPED) { 1728 trace_pci_update_mappings_del(d->name, pci_dev_bus_num(d), 1729 PCI_SLOT(d->devfn), 1730 PCI_FUNC(d->devfn), 1731 i, r->addr, r->size); 1732 memory_region_del_subregion(r->address_space, r->memory); 1733 } 1734 r->addr = new_addr; 1735 if (r->addr != PCI_BAR_UNMAPPED) { 1736 trace_pci_update_mappings_add(d->name, pci_dev_bus_num(d), 1737 PCI_SLOT(d->devfn), 1738 PCI_FUNC(d->devfn), 1739 i, r->addr, r->size); 1740 memory_region_add_subregion_overlap(r->address_space, 1741 r->addr, r->memory, 1); 1742 } 1743 } 1744 1745 pci_update_vga(d); 1746 } 1747 1748 int pci_irq_disabled(PCIDevice *d) 1749 { 1750 return pci_get_word(d->config + PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE; 1751 } 1752 1753 /* Called after interrupt disabled field update in config space, 1754 * assert/deassert interrupts if necessary. 1755 * Gets original interrupt disable bit value (before update). */ 1756 static void pci_update_irq_disabled(PCIDevice *d, int was_irq_disabled) 1757 { 1758 int i, disabled = pci_irq_disabled(d); 1759 if (disabled == was_irq_disabled) 1760 return; 1761 for (i = 0; i < PCI_NUM_PINS; ++i) { 1762 int state = pci_irq_state(d, i); 1763 pci_change_irq_level(d, i, disabled ? -state : state); 1764 } 1765 } 1766 1767 uint32_t pci_default_read_config(PCIDevice *d, 1768 uint32_t address, int len) 1769 { 1770 uint32_t val = 0; 1771 1772 assert(address + len <= pci_config_size(d)); 1773 1774 if (pci_is_express_downstream_port(d) && 1775 ranges_overlap(address, len, d->exp.exp_cap + PCI_EXP_LNKSTA, 2)) { 1776 pcie_sync_bridge_lnk(d); 1777 } 1778 memcpy(&val, d->config + address, len); 1779 return le32_to_cpu(val); 1780 } 1781 1782 void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int l) 1783 { 1784 uint8_t new_pm_state, old_pm_state = pci_pm_state(d); 1785 int i, was_irq_disabled = pci_irq_disabled(d); 1786 uint32_t val = val_in; 1787 1788 assert(addr + l <= pci_config_size(d)); 1789 1790 for (i = 0; i < l; val >>= 8, ++i) { 1791 uint8_t wmask = d->wmask[addr + i]; 1792 uint8_t w1cmask = d->w1cmask[addr + i]; 1793 assert(!(wmask & w1cmask)); 1794 d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask); 1795 d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */ 1796 } 1797 1798 new_pm_state = pci_pm_update(d, addr, l, old_pm_state); 1799 1800 if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) || 1801 ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) || 1802 ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) || 1803 range_covers_byte(addr, l, PCI_COMMAND) || 1804 !!new_pm_state != !!old_pm_state) { 1805 pci_update_mappings(d); 1806 } 1807 1808 if (ranges_overlap(addr, l, PCI_COMMAND, 2)) { 1809 pci_update_irq_disabled(d, was_irq_disabled); 1810 pci_set_master(d, (pci_get_word(d->config + PCI_COMMAND) & 1811 PCI_COMMAND_MASTER) && d->enabled); 1812 } 1813 1814 msi_write_config(d, addr, val_in, l); 1815 msix_write_config(d, addr, val_in, l); 1816 pcie_sriov_config_write(d, addr, val_in, l); 1817 } 1818 1819 /***********************************************************/ 1820 /* generic PCI irq support */ 1821 1822 /* 0 <= irq_num <= 3. level must be 0 or 1 */ 1823 static void pci_irq_handler(void *opaque, int irq_num, int level) 1824 { 1825 PCIDevice *pci_dev = opaque; 1826 int change; 1827 1828 assert(0 <= irq_num && irq_num < PCI_NUM_PINS); 1829 assert(level == 0 || level == 1); 1830 change = level - pci_irq_state(pci_dev, irq_num); 1831 if (!change) 1832 return; 1833 1834 pci_set_irq_state(pci_dev, irq_num, level); 1835 pci_update_irq_status(pci_dev); 1836 if (pci_irq_disabled(pci_dev)) 1837 return; 1838 pci_change_irq_level(pci_dev, irq_num, change); 1839 } 1840 1841 qemu_irq pci_allocate_irq(PCIDevice *pci_dev) 1842 { 1843 int intx = pci_intx(pci_dev); 1844 assert(0 <= intx && intx < PCI_NUM_PINS); 1845 1846 return qemu_allocate_irq(pci_irq_handler, pci_dev, intx); 1847 } 1848 1849 void pci_set_irq(PCIDevice *pci_dev, int level) 1850 { 1851 int intx = pci_intx(pci_dev); 1852 pci_irq_handler(pci_dev, intx, level); 1853 } 1854 1855 /* Special hooks used by device assignment */ 1856 void pci_bus_set_route_irq_fn(PCIBus *bus, pci_route_irq_fn route_intx_to_irq) 1857 { 1858 assert(pci_bus_is_root(bus)); 1859 bus->route_intx_to_irq = route_intx_to_irq; 1860 } 1861 1862 PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin) 1863 { 1864 PCIBus *bus; 1865 1866 do { 1867 int dev_irq = pin; 1868 bus = pci_get_bus(dev); 1869 pin = bus->map_irq(dev, pin); 1870 trace_pci_route_irq(dev_irq, DEVICE(dev)->canonical_path, pin, 1871 pci_bus_is_root(bus) ? "root-complex" 1872 : DEVICE(bus->parent_dev)->canonical_path); 1873 dev = bus->parent_dev; 1874 } while (dev); 1875 1876 if (!bus->route_intx_to_irq) { 1877 error_report("PCI: Bug - unimplemented PCI INTx routing (%s)", 1878 object_get_typename(OBJECT(bus->qbus.parent))); 1879 return (PCIINTxRoute) { PCI_INTX_DISABLED, -1 }; 1880 } 1881 1882 return bus->route_intx_to_irq(bus->irq_opaque, pin); 1883 } 1884 1885 bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new) 1886 { 1887 return old->mode != new->mode || old->irq != new->irq; 1888 } 1889 1890 void pci_bus_fire_intx_routing_notifier(PCIBus *bus) 1891 { 1892 PCIDevice *dev; 1893 PCIBus *sec; 1894 int i; 1895 1896 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 1897 dev = bus->devices[i]; 1898 if (dev && dev->intx_routing_notifier) { 1899 dev->intx_routing_notifier(dev); 1900 } 1901 } 1902 1903 QLIST_FOREACH(sec, &bus->child, sibling) { 1904 pci_bus_fire_intx_routing_notifier(sec); 1905 } 1906 } 1907 1908 void pci_device_set_intx_routing_notifier(PCIDevice *dev, 1909 PCIINTxRoutingNotifier notifier) 1910 { 1911 dev->intx_routing_notifier = notifier; 1912 } 1913 1914 /* 1915 * PCI-to-PCI bridge specification 1916 * 9.1: Interrupt routing. Table 9-1 1917 * 1918 * the PCI Express Base Specification, Revision 2.1 1919 * 2.2.8.1: INTx interrupt signaling - Rules 1920 * the Implementation Note 1921 * Table 2-20 1922 */ 1923 /* 1924 * 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD 1925 * 0-origin unlike PCI interrupt pin register. 1926 */ 1927 int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin) 1928 { 1929 return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin); 1930 } 1931 1932 /***********************************************************/ 1933 /* monitor info on PCI */ 1934 1935 static const pci_class_desc pci_class_descriptions[] = 1936 { 1937 { 0x0001, "VGA controller", "display"}, 1938 { 0x0100, "SCSI controller", "scsi"}, 1939 { 0x0101, "IDE controller", "ide"}, 1940 { 0x0102, "Floppy controller", "fdc"}, 1941 { 0x0103, "IPI controller", "ipi"}, 1942 { 0x0104, "RAID controller", "raid"}, 1943 { 0x0106, "SATA controller"}, 1944 { 0x0107, "SAS controller"}, 1945 { 0x0180, "Storage controller"}, 1946 { 0x0200, "Ethernet controller", "ethernet"}, 1947 { 0x0201, "Token Ring controller", "token-ring"}, 1948 { 0x0202, "FDDI controller", "fddi"}, 1949 { 0x0203, "ATM controller", "atm"}, 1950 { 0x0280, "Network controller"}, 1951 { 0x0300, "VGA controller", "display", 0x00ff}, 1952 { 0x0301, "XGA controller"}, 1953 { 0x0302, "3D controller"}, 1954 { 0x0380, "Display controller"}, 1955 { 0x0400, "Video controller", "video"}, 1956 { 0x0401, "Audio controller", "sound"}, 1957 { 0x0402, "Phone"}, 1958 { 0x0403, "Audio controller", "sound"}, 1959 { 0x0480, "Multimedia controller"}, 1960 { 0x0500, "RAM controller", "memory"}, 1961 { 0x0501, "Flash controller", "flash"}, 1962 { 0x0580, "Memory controller"}, 1963 { 0x0600, "Host bridge", "host"}, 1964 { 0x0601, "ISA bridge", "isa"}, 1965 { 0x0602, "EISA bridge", "eisa"}, 1966 { 0x0603, "MC bridge", "mca"}, 1967 { 0x0604, "PCI bridge", "pci-bridge"}, 1968 { 0x0605, "PCMCIA bridge", "pcmcia"}, 1969 { 0x0606, "NUBUS bridge", "nubus"}, 1970 { 0x0607, "CARDBUS bridge", "cardbus"}, 1971 { 0x0608, "RACEWAY bridge"}, 1972 { 0x0680, "Bridge"}, 1973 { 0x0700, "Serial port", "serial"}, 1974 { 0x0701, "Parallel port", "parallel"}, 1975 { 0x0800, "Interrupt controller", "interrupt-controller"}, 1976 { 0x0801, "DMA controller", "dma-controller"}, 1977 { 0x0802, "Timer", "timer"}, 1978 { 0x0803, "RTC", "rtc"}, 1979 { 0x0900, "Keyboard", "keyboard"}, 1980 { 0x0901, "Pen", "pen"}, 1981 { 0x0902, "Mouse", "mouse"}, 1982 { 0x0A00, "Dock station", "dock", 0x00ff}, 1983 { 0x0B00, "i386 cpu", "cpu", 0x00ff}, 1984 { 0x0c00, "Firewire controller", "firewire"}, 1985 { 0x0c01, "Access bus controller", "access-bus"}, 1986 { 0x0c02, "SSA controller", "ssa"}, 1987 { 0x0c03, "USB controller", "usb"}, 1988 { 0x0c04, "Fibre channel controller", "fibre-channel"}, 1989 { 0x0c05, "SMBus"}, 1990 { 0, NULL} 1991 }; 1992 1993 void pci_for_each_device_under_bus_reverse(PCIBus *bus, 1994 pci_bus_dev_fn fn, 1995 void *opaque) 1996 { 1997 PCIDevice *d; 1998 int devfn; 1999 2000 for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { 2001 d = bus->devices[ARRAY_SIZE(bus->devices) - 1 - devfn]; 2002 if (d) { 2003 fn(bus, d, opaque); 2004 } 2005 } 2006 } 2007 2008 void pci_for_each_device_reverse(PCIBus *bus, int bus_num, 2009 pci_bus_dev_fn fn, void *opaque) 2010 { 2011 bus = pci_find_bus_nr(bus, bus_num); 2012 2013 if (bus) { 2014 pci_for_each_device_under_bus_reverse(bus, fn, opaque); 2015 } 2016 } 2017 2018 void pci_for_each_device_under_bus(PCIBus *bus, 2019 pci_bus_dev_fn fn, void *opaque) 2020 { 2021 PCIDevice *d; 2022 int devfn; 2023 2024 for(devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { 2025 d = bus->devices[devfn]; 2026 if (d) { 2027 fn(bus, d, opaque); 2028 } 2029 } 2030 } 2031 2032 void pci_for_each_device(PCIBus *bus, int bus_num, 2033 pci_bus_dev_fn fn, void *opaque) 2034 { 2035 bus = pci_find_bus_nr(bus, bus_num); 2036 2037 if (bus) { 2038 pci_for_each_device_under_bus(bus, fn, opaque); 2039 } 2040 } 2041 2042 const pci_class_desc *get_class_desc(int class) 2043 { 2044 const pci_class_desc *desc; 2045 2046 desc = pci_class_descriptions; 2047 while (desc->desc && class != desc->class) { 2048 desc++; 2049 } 2050 2051 return desc; 2052 } 2053 2054 void pci_init_nic_devices(PCIBus *bus, const char *default_model) 2055 { 2056 qemu_create_nic_bus_devices(&bus->qbus, TYPE_PCI_DEVICE, default_model, 2057 "virtio", "virtio-net-pci"); 2058 } 2059 2060 bool pci_init_nic_in_slot(PCIBus *rootbus, const char *model, 2061 const char *alias, const char *devaddr) 2062 { 2063 NICInfo *nd = qemu_find_nic_info(model, true, alias); 2064 int dom, busnr, devfn; 2065 PCIDevice *pci_dev; 2066 unsigned slot; 2067 unsigned func; 2068 2069 PCIBus *bus; 2070 2071 if (!nd) { 2072 return false; 2073 } 2074 2075 if (!devaddr || pci_parse_devaddr(devaddr, &dom, &busnr, &slot, &func) < 0) { 2076 error_report("Invalid PCI device address %s for device %s", 2077 devaddr, model); 2078 exit(1); 2079 } 2080 2081 if (dom != 0) { 2082 error_report("No support for non-zero PCI domains"); 2083 exit(1); 2084 } 2085 2086 devfn = PCI_DEVFN(slot, func); 2087 2088 bus = pci_find_bus_nr(rootbus, busnr); 2089 if (!bus) { 2090 error_report("Invalid PCI device address %s for device %s", 2091 devaddr, model); 2092 exit(1); 2093 } 2094 2095 pci_dev = pci_new(devfn, model); 2096 qdev_set_nic_properties(&pci_dev->qdev, nd); 2097 pci_realize_and_unref(pci_dev, bus, &error_fatal); 2098 return true; 2099 } 2100 2101 PCIDevice *pci_vga_init(PCIBus *bus) 2102 { 2103 vga_interface_created = true; 2104 switch (vga_interface_type) { 2105 case VGA_CIRRUS: 2106 return pci_create_simple(bus, -1, "cirrus-vga"); 2107 case VGA_QXL: 2108 return pci_create_simple(bus, -1, "qxl-vga"); 2109 case VGA_STD: 2110 return pci_create_simple(bus, -1, "VGA"); 2111 case VGA_VMWARE: 2112 return pci_create_simple(bus, -1, "vmware-svga"); 2113 case VGA_VIRTIO: 2114 return pci_create_simple(bus, -1, "virtio-vga"); 2115 case VGA_NONE: 2116 default: /* Other non-PCI types. Checking for unsupported types is already 2117 done in vl.c. */ 2118 return NULL; 2119 } 2120 } 2121 2122 /* Whether a given bus number is in range of the secondary 2123 * bus of the given bridge device. */ 2124 static bool pci_secondary_bus_in_range(PCIDevice *dev, int bus_num) 2125 { 2126 return !(pci_get_word(dev->config + PCI_BRIDGE_CONTROL) & 2127 PCI_BRIDGE_CTL_BUS_RESET) /* Don't walk the bus if it's reset. */ && 2128 dev->config[PCI_SECONDARY_BUS] <= bus_num && 2129 bus_num <= dev->config[PCI_SUBORDINATE_BUS]; 2130 } 2131 2132 /* Whether a given bus number is in a range of a root bus */ 2133 static bool pci_root_bus_in_range(PCIBus *bus, int bus_num) 2134 { 2135 int i; 2136 2137 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { 2138 PCIDevice *dev = bus->devices[i]; 2139 2140 if (dev && IS_PCI_BRIDGE(dev)) { 2141 if (pci_secondary_bus_in_range(dev, bus_num)) { 2142 return true; 2143 } 2144 } 2145 } 2146 2147 return false; 2148 } 2149 2150 PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num) 2151 { 2152 PCIBus *sec; 2153 2154 if (!bus) { 2155 return NULL; 2156 } 2157 2158 if (pci_bus_num(bus) == bus_num) { 2159 return bus; 2160 } 2161 2162 /* Consider all bus numbers in range for the host pci bridge. */ 2163 if (!pci_bus_is_root(bus) && 2164 !pci_secondary_bus_in_range(bus->parent_dev, bus_num)) { 2165 return NULL; 2166 } 2167 2168 /* try child bus */ 2169 for (; bus; bus = sec) { 2170 QLIST_FOREACH(sec, &bus->child, sibling) { 2171 if (pci_bus_num(sec) == bus_num) { 2172 return sec; 2173 } 2174 /* PXB buses assumed to be children of bus 0 */ 2175 if (pci_bus_is_root(sec)) { 2176 if (pci_root_bus_in_range(sec, bus_num)) { 2177 break; 2178 } 2179 } else { 2180 if (pci_secondary_bus_in_range(sec->parent_dev, bus_num)) { 2181 break; 2182 } 2183 } 2184 } 2185 } 2186 2187 return NULL; 2188 } 2189 2190 void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin, 2191 pci_bus_fn end, void *parent_state) 2192 { 2193 PCIBus *sec; 2194 void *state; 2195 2196 if (!bus) { 2197 return; 2198 } 2199 2200 if (begin) { 2201 state = begin(bus, parent_state); 2202 } else { 2203 state = parent_state; 2204 } 2205 2206 QLIST_FOREACH(sec, &bus->child, sibling) { 2207 pci_for_each_bus_depth_first(sec, begin, end, state); 2208 } 2209 2210 if (end) { 2211 end(bus, state); 2212 } 2213 } 2214 2215 2216 PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn) 2217 { 2218 bus = pci_find_bus_nr(bus, bus_num); 2219 2220 if (!bus) 2221 return NULL; 2222 2223 return bus->devices[devfn]; 2224 } 2225 2226 #define ONBOARD_INDEX_MAX (16 * 1024 - 1) 2227 2228 static void pci_qdev_realize(DeviceState *qdev, Error **errp) 2229 { 2230 PCIDevice *pci_dev = (PCIDevice *)qdev; 2231 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); 2232 ObjectClass *klass = OBJECT_CLASS(pc); 2233 Error *local_err = NULL; 2234 bool is_default_rom; 2235 uint16_t class_id; 2236 2237 /* 2238 * capped by systemd (see: udev-builtin-net_id.c) 2239 * as it's the only known user honor it to avoid users 2240 * misconfigure QEMU and then wonder why acpi-index doesn't work 2241 */ 2242 if (pci_dev->acpi_index > ONBOARD_INDEX_MAX) { 2243 error_setg(errp, "acpi-index should be less or equal to %u", 2244 ONBOARD_INDEX_MAX); 2245 return; 2246 } 2247 2248 /* 2249 * make sure that acpi-index is unique across all present PCI devices 2250 */ 2251 if (pci_dev->acpi_index) { 2252 GSequence *used_indexes = pci_acpi_index_list(); 2253 2254 if (g_sequence_lookup(used_indexes, 2255 GINT_TO_POINTER(pci_dev->acpi_index), 2256 g_cmp_uint32, NULL)) { 2257 error_setg(errp, "a PCI device with acpi-index = %" PRIu32 2258 " already exist", pci_dev->acpi_index); 2259 return; 2260 } 2261 g_sequence_insert_sorted(used_indexes, 2262 GINT_TO_POINTER(pci_dev->acpi_index), 2263 g_cmp_uint32, NULL); 2264 } 2265 2266 if (pci_dev->romsize != UINT32_MAX && !is_power_of_2(pci_dev->romsize)) { 2267 error_setg(errp, "ROM size %u is not a power of two", pci_dev->romsize); 2268 return; 2269 } 2270 2271 /* initialize cap_present for pci_is_express() and pci_config_size(), 2272 * Note that hybrid PCIs are not set automatically and need to manage 2273 * QEMU_PCI_CAP_EXPRESS manually */ 2274 if (object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE) && 2275 !object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE)) { 2276 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; 2277 } 2278 2279 if (object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE)) { 2280 pci_dev->cap_present |= QEMU_PCIE_CAP_CXL; 2281 } 2282 2283 pci_dev = do_pci_register_device(pci_dev, 2284 object_get_typename(OBJECT(qdev)), 2285 pci_dev->devfn, errp); 2286 if (pci_dev == NULL) 2287 return; 2288 2289 if (pc->realize) { 2290 pc->realize(pci_dev, &local_err); 2291 if (local_err) { 2292 error_propagate(errp, local_err); 2293 do_pci_unregister_device(pci_dev); 2294 return; 2295 } 2296 } 2297 2298 if (!pcie_sriov_register_device(pci_dev, errp)) { 2299 pci_qdev_unrealize(DEVICE(pci_dev)); 2300 return; 2301 } 2302 2303 /* 2304 * A PCIe Downstream Port that do not have ARI Forwarding enabled must 2305 * associate only Device 0 with the device attached to the bus 2306 * representing the Link from the Port (PCIe base spec rev 4.0 ver 0.3, 2307 * sec 7.3.1). 2308 * With ARI, PCI_SLOT() can return non-zero value as the traditional 2309 * 5-bit Device Number and 3-bit Function Number fields in its associated 2310 * Routing IDs, Requester IDs and Completer IDs are interpreted as a 2311 * single 8-bit Function Number. Hence, ignore ARI capable devices. 2312 */ 2313 if (pci_is_express(pci_dev) && 2314 !pcie_find_capability(pci_dev, PCI_EXT_CAP_ID_ARI) && 2315 pcie_has_upstream_port(pci_dev) && 2316 PCI_SLOT(pci_dev->devfn)) { 2317 warn_report("PCI: slot %d is not valid for %s," 2318 " parent device only allows plugging into slot 0.", 2319 PCI_SLOT(pci_dev->devfn), pci_dev->name); 2320 } 2321 2322 if (pci_dev->failover_pair_id) { 2323 if (!pci_bus_is_express(pci_get_bus(pci_dev))) { 2324 error_setg(errp, "failover primary device must be on " 2325 "PCIExpress bus"); 2326 pci_qdev_unrealize(DEVICE(pci_dev)); 2327 return; 2328 } 2329 class_id = pci_get_word(pci_dev->config + PCI_CLASS_DEVICE); 2330 if (class_id != PCI_CLASS_NETWORK_ETHERNET) { 2331 error_setg(errp, "failover primary device is not an " 2332 "Ethernet device"); 2333 pci_qdev_unrealize(DEVICE(pci_dev)); 2334 return; 2335 } 2336 if ((pci_dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) 2337 || (PCI_FUNC(pci_dev->devfn) != 0)) { 2338 error_setg(errp, "failover: primary device must be in its own " 2339 "PCI slot"); 2340 pci_qdev_unrealize(DEVICE(pci_dev)); 2341 return; 2342 } 2343 qdev->allow_unplug_during_migration = true; 2344 } 2345 2346 /* rom loading */ 2347 is_default_rom = false; 2348 if (pci_dev->romfile == NULL && pc->romfile != NULL) { 2349 pci_dev->romfile = g_strdup(pc->romfile); 2350 is_default_rom = true; 2351 } 2352 2353 pci_add_option_rom(pci_dev, is_default_rom, &local_err); 2354 if (local_err) { 2355 error_propagate(errp, local_err); 2356 pci_qdev_unrealize(DEVICE(pci_dev)); 2357 return; 2358 } 2359 2360 pci_set_power(pci_dev, true); 2361 2362 pci_dev->msi_trigger = pci_msi_trigger; 2363 } 2364 2365 static PCIDevice *pci_new_internal(int devfn, bool multifunction, 2366 const char *name) 2367 { 2368 DeviceState *dev; 2369 2370 dev = qdev_new(name); 2371 qdev_prop_set_int32(dev, "addr", devfn); 2372 qdev_prop_set_bit(dev, "multifunction", multifunction); 2373 return PCI_DEVICE(dev); 2374 } 2375 2376 PCIDevice *pci_new_multifunction(int devfn, const char *name) 2377 { 2378 return pci_new_internal(devfn, true, name); 2379 } 2380 2381 PCIDevice *pci_new(int devfn, const char *name) 2382 { 2383 return pci_new_internal(devfn, false, name); 2384 } 2385 2386 bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp) 2387 { 2388 return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp); 2389 } 2390 2391 PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn, 2392 const char *name) 2393 { 2394 PCIDevice *dev = pci_new_multifunction(devfn, name); 2395 pci_realize_and_unref(dev, bus, &error_fatal); 2396 return dev; 2397 } 2398 2399 PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name) 2400 { 2401 PCIDevice *dev = pci_new(devfn, name); 2402 pci_realize_and_unref(dev, bus, &error_fatal); 2403 return dev; 2404 } 2405 2406 static uint8_t pci_find_space(PCIDevice *pdev, uint8_t size) 2407 { 2408 int offset = PCI_CONFIG_HEADER_SIZE; 2409 int i; 2410 for (i = PCI_CONFIG_HEADER_SIZE; i < PCI_CONFIG_SPACE_SIZE; ++i) { 2411 if (pdev->used[i]) 2412 offset = i + 1; 2413 else if (i - offset + 1 == size) 2414 return offset; 2415 } 2416 return 0; 2417 } 2418 2419 static uint8_t pci_find_capability_list(PCIDevice *pdev, uint8_t cap_id, 2420 uint8_t *prev_p) 2421 { 2422 uint8_t next, prev; 2423 2424 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST)) 2425 return 0; 2426 2427 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]); 2428 prev = next + PCI_CAP_LIST_NEXT) 2429 if (pdev->config[next + PCI_CAP_LIST_ID] == cap_id) 2430 break; 2431 2432 if (prev_p) 2433 *prev_p = prev; 2434 return next; 2435 } 2436 2437 static uint8_t pci_find_capability_at_offset(PCIDevice *pdev, uint8_t offset) 2438 { 2439 uint8_t next, prev, found = 0; 2440 2441 if (!(pdev->used[offset])) { 2442 return 0; 2443 } 2444 2445 assert(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST); 2446 2447 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]); 2448 prev = next + PCI_CAP_LIST_NEXT) { 2449 if (next <= offset && next > found) { 2450 found = next; 2451 } 2452 } 2453 return found; 2454 } 2455 2456 /* Patch the PCI vendor and device ids in a PCI rom image if necessary. 2457 This is needed for an option rom which is used for more than one device. */ 2458 static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size) 2459 { 2460 uint16_t vendor_id; 2461 uint16_t device_id; 2462 uint16_t rom_vendor_id; 2463 uint16_t rom_device_id; 2464 uint16_t rom_magic; 2465 uint16_t pcir_offset; 2466 uint8_t checksum; 2467 2468 /* Words in rom data are little endian (like in PCI configuration), 2469 so they can be read / written with pci_get_word / pci_set_word. */ 2470 2471 /* Only a valid rom will be patched. */ 2472 rom_magic = pci_get_word(ptr); 2473 if (rom_magic != 0xaa55) { 2474 trace_pci_bad_rom_magic(rom_magic, 0xaa55); 2475 return; 2476 } 2477 pcir_offset = pci_get_word(ptr + 0x18); 2478 if (pcir_offset + 8 >= size || memcmp(ptr + pcir_offset, "PCIR", 4)) { 2479 trace_pci_bad_pcir_offset(pcir_offset); 2480 return; 2481 } 2482 2483 vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID); 2484 device_id = pci_get_word(pdev->config + PCI_DEVICE_ID); 2485 rom_vendor_id = pci_get_word(ptr + pcir_offset + 4); 2486 rom_device_id = pci_get_word(ptr + pcir_offset + 6); 2487 2488 trace_pci_rom_and_pci_ids(pdev->romfile, vendor_id, device_id, 2489 rom_vendor_id, rom_device_id); 2490 2491 checksum = ptr[6]; 2492 2493 if (vendor_id != rom_vendor_id) { 2494 /* Patch vendor id and checksum (at offset 6 for etherboot roms). */ 2495 checksum += (uint8_t)rom_vendor_id + (uint8_t)(rom_vendor_id >> 8); 2496 checksum -= (uint8_t)vendor_id + (uint8_t)(vendor_id >> 8); 2497 trace_pci_rom_checksum_change(ptr[6], checksum); 2498 ptr[6] = checksum; 2499 pci_set_word(ptr + pcir_offset + 4, vendor_id); 2500 } 2501 2502 if (device_id != rom_device_id) { 2503 /* Patch device id and checksum (at offset 6 for etherboot roms). */ 2504 checksum += (uint8_t)rom_device_id + (uint8_t)(rom_device_id >> 8); 2505 checksum -= (uint8_t)device_id + (uint8_t)(device_id >> 8); 2506 trace_pci_rom_checksum_change(ptr[6], checksum); 2507 ptr[6] = checksum; 2508 pci_set_word(ptr + pcir_offset + 6, device_id); 2509 } 2510 } 2511 2512 /* Add an option rom for the device */ 2513 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, 2514 Error **errp) 2515 { 2516 int64_t size = 0; 2517 g_autofree char *path = NULL; 2518 char name[32]; 2519 const VMStateDescription *vmsd; 2520 2521 /* 2522 * In case of incoming migration ROM will come with migration stream, no 2523 * reason to load the file. Neither we want to fail if local ROM file 2524 * mismatches with specified romsize. 2525 */ 2526 bool load_file = !runstate_check(RUN_STATE_INMIGRATE); 2527 2528 if (!pdev->romfile || !strlen(pdev->romfile)) { 2529 return; 2530 } 2531 2532 if (!pdev->rom_bar) { 2533 /* 2534 * Load rom via fw_cfg instead of creating a rom bar, 2535 * for 0.11 compatibility. 2536 */ 2537 int class = pci_get_word(pdev->config + PCI_CLASS_DEVICE); 2538 2539 /* 2540 * Hot-plugged devices can't use the option ROM 2541 * if the rom bar is disabled. 2542 */ 2543 if (DEVICE(pdev)->hotplugged) { 2544 error_setg(errp, "Hot-plugged device without ROM bar" 2545 " can't have an option ROM"); 2546 return; 2547 } 2548 2549 if (class == 0x0300) { 2550 rom_add_vga(pdev->romfile); 2551 } else { 2552 rom_add_option(pdev->romfile, -1); 2553 } 2554 return; 2555 } 2556 2557 if (pci_is_vf(pdev)) { 2558 if (pdev->rom_bar > 0) { 2559 error_setg(errp, "ROM BAR cannot be enabled for SR-IOV VF"); 2560 } 2561 2562 return; 2563 } 2564 2565 if (load_file || pdev->romsize == UINT32_MAX) { 2566 path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile); 2567 if (path == NULL) { 2568 path = g_strdup(pdev->romfile); 2569 } 2570 2571 size = get_image_size(path, NULL); 2572 if (size < 0) { 2573 error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile); 2574 return; 2575 } else if (size == 0) { 2576 error_setg(errp, "romfile \"%s\" is empty", pdev->romfile); 2577 return; 2578 } else if (size > 2 * GiB) { 2579 error_setg(errp, 2580 "romfile \"%s\" too large (size cannot exceed 2 GiB)", 2581 pdev->romfile); 2582 return; 2583 } 2584 if (pdev->romsize != UINT_MAX) { 2585 if (size > pdev->romsize) { 2586 error_setg(errp, "romfile \"%s\" (%u bytes) " 2587 "is too large for ROM size %u", 2588 pdev->romfile, (uint32_t)size, pdev->romsize); 2589 return; 2590 } 2591 } else { 2592 pdev->romsize = pow2ceil(size); 2593 } 2594 } 2595 2596 vmsd = qdev_get_vmsd(DEVICE(pdev)); 2597 snprintf(name, sizeof(name), "%s.rom", 2598 vmsd ? vmsd->name : object_get_typename(OBJECT(pdev))); 2599 2600 pdev->has_rom = true; 2601 memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize, 2602 &error_fatal); 2603 2604 if (load_file) { 2605 void *ptr = memory_region_get_ram_ptr(&pdev->rom); 2606 2607 if (load_image_size(path, ptr, size) < 0) { 2608 error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile); 2609 return; 2610 } 2611 2612 if (is_default_rom) { 2613 /* Only the default rom images will be patched (if needed). */ 2614 pci_patch_ids(pdev, ptr, size); 2615 } 2616 } 2617 2618 pci_register_bar(pdev, PCI_ROM_SLOT, 0, &pdev->rom); 2619 } 2620 2621 static void pci_del_option_rom(PCIDevice *pdev) 2622 { 2623 if (!pdev->has_rom) 2624 return; 2625 2626 vmstate_unregister_ram(&pdev->rom, &pdev->qdev); 2627 pdev->has_rom = false; 2628 } 2629 2630 /* 2631 * On success, pci_add_capability() returns a positive value 2632 * that the offset of the pci capability. 2633 * On failure, it sets an error and returns a negative error 2634 * code. 2635 */ 2636 int pci_add_capability(PCIDevice *pdev, uint8_t cap_id, 2637 uint8_t offset, uint8_t size, 2638 Error **errp) 2639 { 2640 uint8_t *config; 2641 int i, overlapping_cap; 2642 2643 if (!offset) { 2644 offset = pci_find_space(pdev, size); 2645 /* out of PCI config space is programming error */ 2646 assert(offset); 2647 } else { 2648 /* Verify that capabilities don't overlap. Note: device assignment 2649 * depends on this check to verify that the device is not broken. 2650 * Should never trigger for emulated devices, but it's helpful 2651 * for debugging these. */ 2652 for (i = offset; i < offset + size; i++) { 2653 overlapping_cap = pci_find_capability_at_offset(pdev, i); 2654 if (overlapping_cap) { 2655 error_setg(errp, "%s:%02x:%02x.%x " 2656 "Attempt to add PCI capability %x at offset " 2657 "%x overlaps existing capability %x at offset %x", 2658 pci_root_bus_path(pdev), pci_dev_bus_num(pdev), 2659 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 2660 cap_id, offset, overlapping_cap, i); 2661 return -EINVAL; 2662 } 2663 } 2664 } 2665 2666 config = pdev->config + offset; 2667 config[PCI_CAP_LIST_ID] = cap_id; 2668 config[PCI_CAP_LIST_NEXT] = pdev->config[PCI_CAPABILITY_LIST]; 2669 pdev->config[PCI_CAPABILITY_LIST] = offset; 2670 pdev->config[PCI_STATUS] |= PCI_STATUS_CAP_LIST; 2671 memset(pdev->used + offset, 0xFF, QEMU_ALIGN_UP(size, 4)); 2672 /* Make capability read-only by default */ 2673 memset(pdev->wmask + offset, 0, size); 2674 /* Check capability by default */ 2675 memset(pdev->cmask + offset, 0xFF, size); 2676 return offset; 2677 } 2678 2679 /* Unlink capability from the pci config space. */ 2680 void pci_del_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t size) 2681 { 2682 uint8_t prev, offset = pci_find_capability_list(pdev, cap_id, &prev); 2683 if (!offset) 2684 return; 2685 pdev->config[prev] = pdev->config[offset + PCI_CAP_LIST_NEXT]; 2686 /* Make capability writable again */ 2687 memset(pdev->wmask + offset, 0xff, size); 2688 memset(pdev->w1cmask + offset, 0, size); 2689 /* Clear cmask as device-specific registers can't be checked */ 2690 memset(pdev->cmask + offset, 0, size); 2691 memset(pdev->used + offset, 0, QEMU_ALIGN_UP(size, 4)); 2692 2693 if (!pdev->config[PCI_CAPABILITY_LIST]) 2694 pdev->config[PCI_STATUS] &= ~PCI_STATUS_CAP_LIST; 2695 } 2696 2697 uint8_t pci_find_capability(PCIDevice *pdev, uint8_t cap_id) 2698 { 2699 return pci_find_capability_list(pdev, cap_id, NULL); 2700 } 2701 2702 static char *pci_dev_fw_name(DeviceState *dev, char *buf, int len) 2703 { 2704 PCIDevice *d = (PCIDevice *)dev; 2705 const char *name = NULL; 2706 const pci_class_desc *desc = pci_class_descriptions; 2707 int class = pci_get_word(d->config + PCI_CLASS_DEVICE); 2708 2709 while (desc->desc && 2710 (class & ~desc->fw_ign_bits) != 2711 (desc->class & ~desc->fw_ign_bits)) { 2712 desc++; 2713 } 2714 2715 if (desc->desc) { 2716 name = desc->fw_name; 2717 } 2718 2719 if (name) { 2720 pstrcpy(buf, len, name); 2721 } else { 2722 snprintf(buf, len, "pci%04x,%04x", 2723 pci_get_word(d->config + PCI_VENDOR_ID), 2724 pci_get_word(d->config + PCI_DEVICE_ID)); 2725 } 2726 2727 return buf; 2728 } 2729 2730 static char *pcibus_get_fw_dev_path(DeviceState *dev) 2731 { 2732 PCIDevice *d = (PCIDevice *)dev; 2733 char name[33]; 2734 int has_func = !!PCI_FUNC(d->devfn); 2735 2736 return g_strdup_printf("%s@%x%s%.*x", 2737 pci_dev_fw_name(dev, name, sizeof(name)), 2738 PCI_SLOT(d->devfn), 2739 has_func ? "," : "", 2740 has_func, 2741 PCI_FUNC(d->devfn)); 2742 } 2743 2744 static char *pcibus_get_dev_path(DeviceState *dev) 2745 { 2746 PCIDevice *d = container_of(dev, PCIDevice, qdev); 2747 PCIDevice *t; 2748 int slot_depth; 2749 /* Path format: Domain:00:Slot.Function:Slot.Function....:Slot.Function. 2750 * 00 is added here to make this format compatible with 2751 * domain:Bus:Slot.Func for systems without nested PCI bridges. 2752 * Slot.Function list specifies the slot and function numbers for all 2753 * devices on the path from root to the specific device. */ 2754 const char *root_bus_path; 2755 int root_bus_len; 2756 char slot[] = ":SS.F"; 2757 int slot_len = sizeof slot - 1 /* For '\0' */; 2758 int path_len; 2759 char *path, *p; 2760 int s; 2761 2762 root_bus_path = pci_root_bus_path(d); 2763 root_bus_len = strlen(root_bus_path); 2764 2765 /* Calculate # of slots on path between device and root. */; 2766 slot_depth = 0; 2767 for (t = d; t; t = pci_get_bus(t)->parent_dev) { 2768 ++slot_depth; 2769 } 2770 2771 path_len = root_bus_len + slot_len * slot_depth; 2772 2773 /* Allocate memory, fill in the terminating null byte. */ 2774 path = g_malloc(path_len + 1 /* For '\0' */); 2775 path[path_len] = '\0'; 2776 2777 memcpy(path, root_bus_path, root_bus_len); 2778 2779 /* Fill in slot numbers. We walk up from device to root, so need to print 2780 * them in the reverse order, last to first. */ 2781 p = path + path_len; 2782 for (t = d; t; t = pci_get_bus(t)->parent_dev) { 2783 p -= slot_len; 2784 s = snprintf(slot, sizeof slot, ":%02x.%x", 2785 PCI_SLOT(t->devfn), PCI_FUNC(t->devfn)); 2786 assert(s == slot_len); 2787 memcpy(p, slot, slot_len); 2788 } 2789 2790 return path; 2791 } 2792 2793 static int pci_qdev_find_recursive(PCIBus *bus, 2794 const char *id, PCIDevice **pdev) 2795 { 2796 DeviceState *qdev = qdev_find_recursive(&bus->qbus, id); 2797 if (!qdev) { 2798 return -ENODEV; 2799 } 2800 2801 /* roughly check if given qdev is pci device */ 2802 if (object_dynamic_cast(OBJECT(qdev), TYPE_PCI_DEVICE)) { 2803 *pdev = PCI_DEVICE(qdev); 2804 return 0; 2805 } 2806 return -EINVAL; 2807 } 2808 2809 int pci_qdev_find_device(const char *id, PCIDevice **pdev) 2810 { 2811 PCIHostState *host_bridge; 2812 int rc = -ENODEV; 2813 2814 QLIST_FOREACH(host_bridge, &pci_host_bridges, next) { 2815 int tmp = pci_qdev_find_recursive(host_bridge->bus, id, pdev); 2816 if (!tmp) { 2817 rc = 0; 2818 break; 2819 } 2820 if (tmp != -ENODEV) { 2821 rc = tmp; 2822 } 2823 } 2824 2825 return rc; 2826 } 2827 2828 MemoryRegion *pci_address_space(PCIDevice *dev) 2829 { 2830 return pci_get_bus(dev)->address_space_mem; 2831 } 2832 2833 MemoryRegion *pci_address_space_io(PCIDevice *dev) 2834 { 2835 return pci_get_bus(dev)->address_space_io; 2836 } 2837 2838 static void pci_device_class_init(ObjectClass *klass, const void *data) 2839 { 2840 DeviceClass *k = DEVICE_CLASS(klass); 2841 2842 k->realize = pci_qdev_realize; 2843 k->unrealize = pci_qdev_unrealize; 2844 k->bus_type = TYPE_PCI_BUS; 2845 device_class_set_props(k, pci_props); 2846 object_class_property_set_description( 2847 klass, "x-max-bounce-buffer-size", 2848 "Maximum buffer size allocated for bounce buffers used for mapped " 2849 "access to indirect DMA memory"); 2850 } 2851 2852 static void pci_device_class_base_init(ObjectClass *klass, const void *data) 2853 { 2854 if (!object_class_is_abstract(klass)) { 2855 ObjectClass *conventional = 2856 object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE); 2857 ObjectClass *pcie = 2858 object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE); 2859 ObjectClass *cxl = 2860 object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE); 2861 assert(conventional || pcie || cxl); 2862 } 2863 } 2864 2865 /* 2866 * Get IOMMU root bus, aliased bus and devfn of a PCI device 2867 * 2868 * IOMMU root bus is needed by all call sites to call into iommu_ops. 2869 * For call sites which don't need aliased BDF, passing NULL to 2870 * aliased_[bus|devfn] is allowed. 2871 * 2872 * @piommu_bus: return root #PCIBus backed by an IOMMU for the PCI device. 2873 * 2874 * @aliased_bus: return aliased #PCIBus of the PCI device, optional. 2875 * 2876 * @aliased_devfn: return aliased devfn of the PCI device, optional. 2877 */ 2878 static void pci_device_get_iommu_bus_devfn(PCIDevice *dev, 2879 PCIBus **piommu_bus, 2880 PCIBus **aliased_bus, 2881 int *aliased_devfn) 2882 { 2883 PCIBus *bus = pci_get_bus(dev); 2884 PCIBus *iommu_bus = bus; 2885 int devfn = dev->devfn; 2886 2887 while (iommu_bus && !iommu_bus->iommu_ops && iommu_bus->parent_dev) { 2888 PCIBus *parent_bus = pci_get_bus(iommu_bus->parent_dev); 2889 2890 /* 2891 * The requester ID of the provided device may be aliased, as seen from 2892 * the IOMMU, due to topology limitations. The IOMMU relies on a 2893 * requester ID to provide a unique AddressSpace for devices, but 2894 * conventional PCI buses pre-date such concepts. Instead, the PCIe- 2895 * to-PCI bridge creates and accepts transactions on behalf of down- 2896 * stream devices. When doing so, all downstream devices are masked 2897 * (aliased) behind a single requester ID. The requester ID used 2898 * depends on the format of the bridge devices. Proper PCIe-to-PCI 2899 * bridges, with a PCIe capability indicating such, follow the 2900 * guidelines of chapter 2.3 of the PCIe-to-PCI/X bridge specification, 2901 * where the bridge uses the seconary bus as the bridge portion of the 2902 * requester ID and devfn of 00.0. For other bridges, typically those 2903 * found on the root complex such as the dmi-to-pci-bridge, we follow 2904 * the convention of typical bare-metal hardware, which uses the 2905 * requester ID of the bridge itself. There are device specific 2906 * exceptions to these rules, but these are the defaults that the 2907 * Linux kernel uses when determining DMA aliases itself and believed 2908 * to be true for the bare metal equivalents of the devices emulated 2909 * in QEMU. 2910 */ 2911 if (!pci_bus_is_express(iommu_bus)) { 2912 PCIDevice *parent = iommu_bus->parent_dev; 2913 2914 if (pci_is_express(parent) && 2915 pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) { 2916 devfn = PCI_DEVFN(0, 0); 2917 bus = iommu_bus; 2918 } else { 2919 devfn = parent->devfn; 2920 bus = parent_bus; 2921 } 2922 } 2923 2924 /* 2925 * When multiple PCI Express Root Buses are defined using pxb-pcie, 2926 * the IOMMU configuration may be specific to each root bus. However, 2927 * pxb-pcie acts as a special root complex whose parent is effectively 2928 * the default root complex(pcie.0). Ensure that we retrieve the 2929 * correct IOMMU ops(if any) in such cases. 2930 */ 2931 if (pci_bus_is_express(iommu_bus) && pci_bus_is_root(iommu_bus)) { 2932 if (parent_bus->iommu_per_bus) { 2933 break; 2934 } 2935 } 2936 2937 iommu_bus = parent_bus; 2938 } 2939 2940 assert(0 <= devfn && devfn < PCI_DEVFN_MAX); 2941 assert(iommu_bus); 2942 2943 if (pci_bus_bypass_iommu(bus) || !iommu_bus->iommu_ops) { 2944 iommu_bus = NULL; 2945 } 2946 2947 *piommu_bus = iommu_bus; 2948 2949 if (aliased_bus) { 2950 *aliased_bus = bus; 2951 } 2952 2953 if (aliased_devfn) { 2954 *aliased_devfn = devfn; 2955 } 2956 } 2957 2958 AddressSpace *pci_device_iommu_address_space(PCIDevice *dev) 2959 { 2960 PCIBus *bus; 2961 PCIBus *iommu_bus; 2962 int devfn; 2963 2964 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); 2965 if (iommu_bus) { 2966 return iommu_bus->iommu_ops->get_address_space(bus, 2967 iommu_bus->iommu_opaque, devfn); 2968 } 2969 return &address_space_memory; 2970 } 2971 2972 int pci_iommu_init_iotlb_notifier(PCIDevice *dev, IOMMUNotifier *n, 2973 IOMMUNotify fn, void *opaque) 2974 { 2975 PCIBus *bus; 2976 PCIBus *iommu_bus; 2977 int devfn; 2978 2979 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); 2980 if (iommu_bus && iommu_bus->iommu_ops->init_iotlb_notifier) { 2981 iommu_bus->iommu_ops->init_iotlb_notifier(bus, iommu_bus->iommu_opaque, 2982 devfn, n, fn, opaque); 2983 return 0; 2984 } 2985 2986 return -ENODEV; 2987 } 2988 2989 bool pci_device_set_iommu_device(PCIDevice *dev, HostIOMMUDevice *hiod, 2990 Error **errp) 2991 { 2992 PCIBus *iommu_bus, *aliased_bus; 2993 int aliased_devfn; 2994 2995 /* set_iommu_device requires device's direct BDF instead of aliased BDF */ 2996 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, 2997 &aliased_bus, &aliased_devfn); 2998 if (iommu_bus && iommu_bus->iommu_ops->set_iommu_device) { 2999 hiod->aliased_bus = aliased_bus; 3000 hiod->aliased_devfn = aliased_devfn; 3001 return iommu_bus->iommu_ops->set_iommu_device(pci_get_bus(dev), 3002 iommu_bus->iommu_opaque, 3003 dev->devfn, hiod, errp); 3004 } 3005 return true; 3006 } 3007 3008 void pci_device_unset_iommu_device(PCIDevice *dev) 3009 { 3010 PCIBus *iommu_bus; 3011 3012 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, NULL, NULL); 3013 if (iommu_bus && iommu_bus->iommu_ops->unset_iommu_device) { 3014 return iommu_bus->iommu_ops->unset_iommu_device(pci_get_bus(dev), 3015 iommu_bus->iommu_opaque, 3016 dev->devfn); 3017 } 3018 } 3019 3020 int pci_pri_request_page(PCIDevice *dev, uint32_t pasid, bool priv_req, 3021 bool exec_req, hwaddr addr, bool lpig, 3022 uint16_t prgi, bool is_read, bool is_write) 3023 { 3024 PCIBus *bus; 3025 PCIBus *iommu_bus; 3026 int devfn; 3027 3028 if (!dev->is_master || 3029 ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev))) { 3030 return -EPERM; 3031 } 3032 3033 if (!pcie_pri_enabled(dev)) { 3034 return -EPERM; 3035 } 3036 3037 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); 3038 if (iommu_bus && iommu_bus->iommu_ops->pri_request_page) { 3039 return iommu_bus->iommu_ops->pri_request_page(bus, 3040 iommu_bus->iommu_opaque, 3041 devfn, pasid, priv_req, 3042 exec_req, addr, lpig, prgi, 3043 is_read, is_write); 3044 } 3045 3046 return -ENODEV; 3047 } 3048 3049 int pci_pri_register_notifier(PCIDevice *dev, uint32_t pasid, 3050 IOMMUPRINotifier *notifier) 3051 { 3052 PCIBus *bus; 3053 PCIBus *iommu_bus; 3054 int devfn; 3055 3056 if (!dev->is_master || 3057 ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev))) { 3058 return -EPERM; 3059 } 3060 3061 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); 3062 if (iommu_bus && iommu_bus->iommu_ops->pri_register_notifier) { 3063 iommu_bus->iommu_ops->pri_register_notifier(bus, 3064 iommu_bus->iommu_opaque, 3065 devfn, pasid, notifier); 3066 return 0; 3067 } 3068 3069 return -ENODEV; 3070 } 3071 3072 void pci_pri_unregister_notifier(PCIDevice *dev, uint32_t pasid) 3073 { 3074 PCIBus *bus; 3075 PCIBus *iommu_bus; 3076 int devfn; 3077 3078 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); 3079 if (iommu_bus && iommu_bus->iommu_ops->pri_unregister_notifier) { 3080 iommu_bus->iommu_ops->pri_unregister_notifier(bus, 3081 iommu_bus->iommu_opaque, 3082 devfn, pasid); 3083 } 3084 } 3085 3086 ssize_t pci_ats_request_translation(PCIDevice *dev, uint32_t pasid, 3087 bool priv_req, bool exec_req, 3088 hwaddr addr, size_t length, 3089 bool no_write, IOMMUTLBEntry *result, 3090 size_t result_length, 3091 uint32_t *err_count) 3092 { 3093 PCIBus *bus; 3094 PCIBus *iommu_bus; 3095 int devfn; 3096 3097 if (!dev->is_master || 3098 ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev))) { 3099 return -EPERM; 3100 } 3101 3102 if (result_length == 0) { 3103 return -ENOSPC; 3104 } 3105 3106 if (!pcie_ats_enabled(dev)) { 3107 return -EPERM; 3108 } 3109 3110 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); 3111 if (iommu_bus && iommu_bus->iommu_ops->ats_request_translation) { 3112 return iommu_bus->iommu_ops->ats_request_translation(bus, 3113 iommu_bus->iommu_opaque, 3114 devfn, pasid, priv_req, 3115 exec_req, addr, length, 3116 no_write, result, 3117 result_length, err_count); 3118 } 3119 3120 return -ENODEV; 3121 } 3122 3123 int pci_iommu_register_iotlb_notifier(PCIDevice *dev, uint32_t pasid, 3124 IOMMUNotifier *n) 3125 { 3126 PCIBus *bus; 3127 PCIBus *iommu_bus; 3128 int devfn; 3129 3130 if ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev)) { 3131 return -EPERM; 3132 } 3133 3134 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); 3135 if (iommu_bus && iommu_bus->iommu_ops->register_iotlb_notifier) { 3136 iommu_bus->iommu_ops->register_iotlb_notifier(bus, 3137 iommu_bus->iommu_opaque, devfn, 3138 pasid, n); 3139 return 0; 3140 } 3141 3142 return -ENODEV; 3143 } 3144 3145 int pci_iommu_unregister_iotlb_notifier(PCIDevice *dev, uint32_t pasid, 3146 IOMMUNotifier *n) 3147 { 3148 PCIBus *bus; 3149 PCIBus *iommu_bus; 3150 int devfn; 3151 3152 if ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev)) { 3153 return -EPERM; 3154 } 3155 3156 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); 3157 if (iommu_bus && iommu_bus->iommu_ops->unregister_iotlb_notifier) { 3158 iommu_bus->iommu_ops->unregister_iotlb_notifier(bus, 3159 iommu_bus->iommu_opaque, 3160 devfn, pasid, n); 3161 return 0; 3162 } 3163 3164 return -ENODEV; 3165 } 3166 3167 int pci_iommu_get_iotlb_info(PCIDevice *dev, uint8_t *addr_width, 3168 uint32_t *min_page_size) 3169 { 3170 PCIBus *iommu_bus; 3171 3172 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, NULL, NULL); 3173 if (iommu_bus && iommu_bus->iommu_ops->get_iotlb_info) { 3174 iommu_bus->iommu_ops->get_iotlb_info(iommu_bus->iommu_opaque, 3175 addr_width, min_page_size); 3176 return 0; 3177 } 3178 3179 return -ENODEV; 3180 } 3181 3182 void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque) 3183 { 3184 /* 3185 * If called, pci_setup_iommu() should provide a minimum set of 3186 * useful callbacks for the bus. 3187 */ 3188 assert(ops); 3189 assert(ops->get_address_space); 3190 3191 bus->iommu_ops = ops; 3192 bus->iommu_opaque = opaque; 3193 } 3194 3195 /* 3196 * Similar to pci_setup_iommu(), but sets iommu_per_bus to true, 3197 * indicating that the IOMMU is specific to this bus. This is used by 3198 * IOMMU implementations that are tied to a specific PCIe root complex. 3199 * 3200 * In QEMU, pxb-pcie behaves as a special root complex whose parent is 3201 * effectively the default root complex (pcie.0). The iommu_per_bus 3202 * is checked in pci_device_get_iommu_bus_devfn() to ensure the correct 3203 * IOMMU ops are returned, avoiding the use of the parent’s IOMMU when 3204 * it's not appropriate. 3205 */ 3206 void pci_setup_iommu_per_bus(PCIBus *bus, const PCIIOMMUOps *ops, 3207 void *opaque) 3208 { 3209 pci_setup_iommu(bus, ops, opaque); 3210 bus->iommu_per_bus = true; 3211 } 3212 3213 static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque) 3214 { 3215 Range *range = opaque; 3216 uint16_t cmd = pci_get_word(dev->config + PCI_COMMAND); 3217 int i; 3218 3219 if (!(cmd & PCI_COMMAND_MEMORY)) { 3220 return; 3221 } 3222 3223 if (IS_PCI_BRIDGE(dev)) { 3224 pcibus_t base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); 3225 pcibus_t limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); 3226 3227 base = MAX(base, 0x1ULL << 32); 3228 3229 if (limit >= base) { 3230 Range pref_range; 3231 range_set_bounds(&pref_range, base, limit); 3232 range_extend(range, &pref_range); 3233 } 3234 } 3235 for (i = 0; i < PCI_NUM_REGIONS; ++i) { 3236 PCIIORegion *r = &dev->io_regions[i]; 3237 pcibus_t lob, upb; 3238 Range region_range; 3239 3240 if (!r->size || 3241 (r->type & PCI_BASE_ADDRESS_SPACE_IO) || 3242 !(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64)) { 3243 continue; 3244 } 3245 3246 lob = pci_bar_address(dev, i, r->type, r->size); 3247 upb = lob + r->size - 1; 3248 if (lob == PCI_BAR_UNMAPPED) { 3249 continue; 3250 } 3251 3252 lob = MAX(lob, 0x1ULL << 32); 3253 3254 if (upb >= lob) { 3255 range_set_bounds(®ion_range, lob, upb); 3256 range_extend(range, ®ion_range); 3257 } 3258 } 3259 } 3260 3261 void pci_bus_get_w64_range(PCIBus *bus, Range *range) 3262 { 3263 range_make_empty(range); 3264 pci_for_each_device_under_bus(bus, pci_dev_get_w64, range); 3265 } 3266 3267 static bool pcie_has_upstream_port(PCIDevice *dev) 3268 { 3269 PCIDevice *parent_dev = pci_bridge_get_device(pci_get_bus(dev)); 3270 3271 /* Device associated with an upstream port. 3272 * As there are several types of these, it's easier to check the 3273 * parent device: upstream ports are always connected to 3274 * root or downstream ports. 3275 */ 3276 return parent_dev && 3277 pci_is_express(parent_dev) && 3278 parent_dev->exp.exp_cap && 3279 (pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_ROOT_PORT || 3280 pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_DOWNSTREAM); 3281 } 3282 3283 PCIDevice *pci_get_function_0(PCIDevice *pci_dev) 3284 { 3285 PCIBus *bus = pci_get_bus(pci_dev); 3286 3287 if(pcie_has_upstream_port(pci_dev)) { 3288 /* With an upstream PCIe port, we only support 1 device at slot 0 */ 3289 return bus->devices[0]; 3290 } else { 3291 /* Other bus types might support multiple devices at slots 0-31 */ 3292 return bus->devices[PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 0)]; 3293 } 3294 } 3295 3296 MSIMessage pci_get_msi_message(PCIDevice *dev, int vector) 3297 { 3298 MSIMessage msg; 3299 if (msix_enabled(dev)) { 3300 msg = msix_get_message(dev, vector); 3301 } else if (msi_enabled(dev)) { 3302 msg = msi_get_message(dev, vector); 3303 } else { 3304 /* Should never happen */ 3305 error_report("%s: unknown interrupt type", __func__); 3306 abort(); 3307 } 3308 return msg; 3309 } 3310 3311 void pci_set_power(PCIDevice *d, bool state) 3312 { 3313 /* 3314 * Don't change the enabled state of VFs when powering on/off the device. 3315 * 3316 * When powering on, VFs must not be enabled immediately but they must 3317 * wait until the guest configures SR-IOV. 3318 * When powering off, their corresponding PFs will be reset and disable 3319 * VFs. 3320 */ 3321 if (!pci_is_vf(d)) { 3322 pci_set_enabled(d, state); 3323 } 3324 } 3325 3326 void pci_set_enabled(PCIDevice *d, bool state) 3327 { 3328 if (d->enabled == state) { 3329 return; 3330 } 3331 3332 d->enabled = state; 3333 pci_update_mappings(d); 3334 pci_set_master(d, (pci_get_word(d->config + PCI_COMMAND) 3335 & PCI_COMMAND_MASTER) && d->enabled); 3336 if (qdev_is_realized(&d->qdev)) { 3337 pci_device_reset(d); 3338 } 3339 } 3340 3341 static const TypeInfo pci_device_type_info = { 3342 .name = TYPE_PCI_DEVICE, 3343 .parent = TYPE_DEVICE, 3344 .instance_size = sizeof(PCIDevice), 3345 .abstract = true, 3346 .class_size = sizeof(PCIDeviceClass), 3347 .class_init = pci_device_class_init, 3348 .class_base_init = pci_device_class_base_init, 3349 }; 3350 3351 static void pci_register_types(void) 3352 { 3353 type_register_static(&pci_bus_info); 3354 type_register_static(&pcie_bus_info); 3355 type_register_static(&cxl_bus_info); 3356 type_register_static(&conventional_pci_interface_info); 3357 type_register_static(&cxl_interface_info); 3358 type_register_static(&pcie_interface_info); 3359 type_register_static(&pci_device_type_info); 3360 } 3361 3362 type_init(pci_register_types) 3363