1 /*
2 * QEMU PCI bus manager
3 *
4 * Copyright (c) 2004 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "qemu/datadir.h"
27 #include "qemu/units.h"
28 #include "hw/irq.h"
29 #include "hw/pci/pci.h"
30 #include "hw/pci/pci_bridge.h"
31 #include "hw/pci/pci_bus.h"
32 #include "hw/pci/pci_host.h"
33 #include "hw/qdev-properties.h"
34 #include "hw/qdev-properties-system.h"
35 #include "migration/qemu-file-types.h"
36 #include "migration/vmstate.h"
37 #include "net/net.h"
38 #include "sysemu/numa.h"
39 #include "sysemu/runstate.h"
40 #include "sysemu/sysemu.h"
41 #include "hw/loader.h"
42 #include "qemu/error-report.h"
43 #include "qemu/range.h"
44 #include "trace.h"
45 #include "hw/pci/msi.h"
46 #include "hw/pci/msix.h"
47 #include "hw/hotplug.h"
48 #include "hw/boards.h"
49 #include "qapi/error.h"
50 #include "qemu/cutils.h"
51 #include "pci-internal.h"
52
53 #include "hw/xen/xen.h"
54 #include "hw/i386/kvm/xen_evtchn.h"
55
56 //#define DEBUG_PCI
57 #ifdef DEBUG_PCI
58 # define PCI_DPRINTF(format, ...) printf(format, ## __VA_ARGS__)
59 #else
60 # define PCI_DPRINTF(format, ...) do { } while (0)
61 #endif
62
63 bool pci_available = true;
64
65 static char *pcibus_get_dev_path(DeviceState *dev);
66 static char *pcibus_get_fw_dev_path(DeviceState *dev);
67 static void pcibus_reset_hold(Object *obj, ResetType type);
68 static bool pcie_has_upstream_port(PCIDevice *dev);
69
70 static Property pci_props[] = {
71 DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1),
72 DEFINE_PROP_STRING("romfile", PCIDevice, romfile),
73 DEFINE_PROP_UINT32("romsize", PCIDevice, romsize, UINT32_MAX),
74 DEFINE_PROP_UINT32("rombar", PCIDevice, rom_bar, 1),
75 DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present,
76 QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false),
77 DEFINE_PROP_BIT("x-pcie-lnksta-dllla", PCIDevice, cap_present,
78 QEMU_PCIE_LNKSTA_DLLLA_BITNR, true),
79 DEFINE_PROP_BIT("x-pcie-extcap-init", PCIDevice, cap_present,
80 QEMU_PCIE_EXTCAP_INIT_BITNR, true),
81 DEFINE_PROP_STRING("failover_pair_id", PCIDevice,
82 failover_pair_id),
83 DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0),
84 DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present,
85 QEMU_PCIE_ERR_UNC_MASK_BITNR, true),
86 DEFINE_PROP_BIT("x-pcie-ari-nextfn-1", PCIDevice, cap_present,
87 QEMU_PCIE_ARI_NEXTFN_1_BITNR, false),
88 DEFINE_PROP_SIZE32("x-max-bounce-buffer-size", PCIDevice,
89 max_bounce_buffer_size, DEFAULT_MAX_BOUNCE_BUFFER_SIZE),
90 DEFINE_PROP_END_OF_LIST()
91 };
92
93 static const VMStateDescription vmstate_pcibus = {
94 .name = "PCIBUS",
95 .version_id = 1,
96 .minimum_version_id = 1,
97 .fields = (const VMStateField[]) {
98 VMSTATE_INT32_EQUAL(nirq, PCIBus, NULL),
99 VMSTATE_VARRAY_INT32(irq_count, PCIBus,
100 nirq, 0, vmstate_info_int32,
101 int32_t),
102 VMSTATE_END_OF_LIST()
103 }
104 };
105
g_cmp_uint32(gconstpointer a,gconstpointer b,gpointer user_data)106 static gint g_cmp_uint32(gconstpointer a, gconstpointer b, gpointer user_data)
107 {
108 return a - b;
109 }
110
pci_acpi_index_list(void)111 static GSequence *pci_acpi_index_list(void)
112 {
113 static GSequence *used_acpi_index_list;
114
115 if (!used_acpi_index_list) {
116 used_acpi_index_list = g_sequence_new(NULL);
117 }
118 return used_acpi_index_list;
119 }
120
pci_init_bus_master(PCIDevice * pci_dev)121 static void pci_init_bus_master(PCIDevice *pci_dev)
122 {
123 AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev);
124
125 memory_region_init_alias(&pci_dev->bus_master_enable_region,
126 OBJECT(pci_dev), "bus master",
127 dma_as->root, 0, memory_region_size(dma_as->root));
128 memory_region_set_enabled(&pci_dev->bus_master_enable_region, false);
129 memory_region_add_subregion(&pci_dev->bus_master_container_region, 0,
130 &pci_dev->bus_master_enable_region);
131 }
132
pcibus_machine_done(Notifier * notifier,void * data)133 static void pcibus_machine_done(Notifier *notifier, void *data)
134 {
135 PCIBus *bus = container_of(notifier, PCIBus, machine_done);
136 int i;
137
138 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
139 if (bus->devices[i]) {
140 pci_init_bus_master(bus->devices[i]);
141 }
142 }
143 }
144
pci_bus_realize(BusState * qbus,Error ** errp)145 static void pci_bus_realize(BusState *qbus, Error **errp)
146 {
147 PCIBus *bus = PCI_BUS(qbus);
148
149 bus->machine_done.notify = pcibus_machine_done;
150 qemu_add_machine_init_done_notifier(&bus->machine_done);
151
152 vmstate_register_any(NULL, &vmstate_pcibus, bus);
153 }
154
pcie_bus_realize(BusState * qbus,Error ** errp)155 static void pcie_bus_realize(BusState *qbus, Error **errp)
156 {
157 PCIBus *bus = PCI_BUS(qbus);
158 Error *local_err = NULL;
159
160 pci_bus_realize(qbus, &local_err);
161 if (local_err) {
162 error_propagate(errp, local_err);
163 return;
164 }
165
166 /*
167 * A PCI-E bus can support extended config space if it's the root
168 * bus, or if the bus/bridge above it does as well
169 */
170 if (pci_bus_is_root(bus)) {
171 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE;
172 } else {
173 PCIBus *parent_bus = pci_get_bus(bus->parent_dev);
174
175 if (pci_bus_allows_extended_config_space(parent_bus)) {
176 bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE;
177 }
178 }
179 }
180
pci_bus_unrealize(BusState * qbus)181 static void pci_bus_unrealize(BusState *qbus)
182 {
183 PCIBus *bus = PCI_BUS(qbus);
184
185 qemu_remove_machine_init_done_notifier(&bus->machine_done);
186
187 vmstate_unregister(NULL, &vmstate_pcibus, bus);
188 }
189
pcibus_num(PCIBus * bus)190 static int pcibus_num(PCIBus *bus)
191 {
192 if (pci_bus_is_root(bus)) {
193 return 0; /* pci host bridge */
194 }
195 return bus->parent_dev->config[PCI_SECONDARY_BUS];
196 }
197
pcibus_numa_node(PCIBus * bus)198 static uint16_t pcibus_numa_node(PCIBus *bus)
199 {
200 return NUMA_NODE_UNASSIGNED;
201 }
202
pci_bus_class_init(ObjectClass * klass,void * data)203 static void pci_bus_class_init(ObjectClass *klass, void *data)
204 {
205 BusClass *k = BUS_CLASS(klass);
206 PCIBusClass *pbc = PCI_BUS_CLASS(klass);
207 ResettableClass *rc = RESETTABLE_CLASS(klass);
208
209 k->print_dev = pcibus_dev_print;
210 k->get_dev_path = pcibus_get_dev_path;
211 k->get_fw_dev_path = pcibus_get_fw_dev_path;
212 k->realize = pci_bus_realize;
213 k->unrealize = pci_bus_unrealize;
214
215 rc->phases.hold = pcibus_reset_hold;
216
217 pbc->bus_num = pcibus_num;
218 pbc->numa_node = pcibus_numa_node;
219 }
220
221 static const TypeInfo pci_bus_info = {
222 .name = TYPE_PCI_BUS,
223 .parent = TYPE_BUS,
224 .instance_size = sizeof(PCIBus),
225 .class_size = sizeof(PCIBusClass),
226 .class_init = pci_bus_class_init,
227 };
228
229 static const TypeInfo cxl_interface_info = {
230 .name = INTERFACE_CXL_DEVICE,
231 .parent = TYPE_INTERFACE,
232 };
233
234 static const TypeInfo pcie_interface_info = {
235 .name = INTERFACE_PCIE_DEVICE,
236 .parent = TYPE_INTERFACE,
237 };
238
239 static const TypeInfo conventional_pci_interface_info = {
240 .name = INTERFACE_CONVENTIONAL_PCI_DEVICE,
241 .parent = TYPE_INTERFACE,
242 };
243
pcie_bus_class_init(ObjectClass * klass,void * data)244 static void pcie_bus_class_init(ObjectClass *klass, void *data)
245 {
246 BusClass *k = BUS_CLASS(klass);
247
248 k->realize = pcie_bus_realize;
249 }
250
251 static const TypeInfo pcie_bus_info = {
252 .name = TYPE_PCIE_BUS,
253 .parent = TYPE_PCI_BUS,
254 .class_init = pcie_bus_class_init,
255 };
256
257 static const TypeInfo cxl_bus_info = {
258 .name = TYPE_CXL_BUS,
259 .parent = TYPE_PCIE_BUS,
260 .class_init = pcie_bus_class_init,
261 };
262
263 static void pci_update_mappings(PCIDevice *d);
264 static void pci_irq_handler(void *opaque, int irq_num, int level);
265 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, Error **);
266 static void pci_del_option_rom(PCIDevice *pdev);
267
268 static uint16_t pci_default_sub_vendor_id = PCI_SUBVENDOR_ID_REDHAT_QUMRANET;
269 static uint16_t pci_default_sub_device_id = PCI_SUBDEVICE_ID_QEMU;
270
271 PCIHostStateList pci_host_bridges;
272
pci_bar(PCIDevice * d,int reg)273 int pci_bar(PCIDevice *d, int reg)
274 {
275 uint8_t type;
276
277 /* PCIe virtual functions do not have their own BARs */
278 assert(!pci_is_vf(d));
279
280 if (reg != PCI_ROM_SLOT)
281 return PCI_BASE_ADDRESS_0 + reg * 4;
282
283 type = d->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION;
284 return type == PCI_HEADER_TYPE_BRIDGE ? PCI_ROM_ADDRESS1 : PCI_ROM_ADDRESS;
285 }
286
pci_irq_state(PCIDevice * d,int irq_num)287 static inline int pci_irq_state(PCIDevice *d, int irq_num)
288 {
289 return (d->irq_state >> irq_num) & 0x1;
290 }
291
pci_set_irq_state(PCIDevice * d,int irq_num,int level)292 static inline void pci_set_irq_state(PCIDevice *d, int irq_num, int level)
293 {
294 d->irq_state &= ~(0x1 << irq_num);
295 d->irq_state |= level << irq_num;
296 }
297
pci_bus_change_irq_level(PCIBus * bus,int irq_num,int change)298 static void pci_bus_change_irq_level(PCIBus *bus, int irq_num, int change)
299 {
300 assert(irq_num >= 0);
301 assert(irq_num < bus->nirq);
302 bus->irq_count[irq_num] += change;
303 bus->set_irq(bus->irq_opaque, irq_num, bus->irq_count[irq_num] != 0);
304 }
305
pci_change_irq_level(PCIDevice * pci_dev,int irq_num,int change)306 static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change)
307 {
308 PCIBus *bus;
309 for (;;) {
310 int dev_irq = irq_num;
311 bus = pci_get_bus(pci_dev);
312 assert(bus->map_irq);
313 irq_num = bus->map_irq(pci_dev, irq_num);
314 trace_pci_route_irq(dev_irq, DEVICE(pci_dev)->canonical_path, irq_num,
315 pci_bus_is_root(bus) ? "root-complex"
316 : DEVICE(bus->parent_dev)->canonical_path);
317 if (bus->set_irq)
318 break;
319 pci_dev = bus->parent_dev;
320 }
321 pci_bus_change_irq_level(bus, irq_num, change);
322 }
323
pci_bus_get_irq_level(PCIBus * bus,int irq_num)324 int pci_bus_get_irq_level(PCIBus *bus, int irq_num)
325 {
326 assert(irq_num >= 0);
327 assert(irq_num < bus->nirq);
328 return !!bus->irq_count[irq_num];
329 }
330
331 /* Update interrupt status bit in config space on interrupt
332 * state change. */
pci_update_irq_status(PCIDevice * dev)333 static void pci_update_irq_status(PCIDevice *dev)
334 {
335 if (dev->irq_state) {
336 dev->config[PCI_STATUS] |= PCI_STATUS_INTERRUPT;
337 } else {
338 dev->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT;
339 }
340 }
341
pci_device_deassert_intx(PCIDevice * dev)342 void pci_device_deassert_intx(PCIDevice *dev)
343 {
344 int i;
345 for (i = 0; i < PCI_NUM_PINS; ++i) {
346 pci_irq_handler(dev, i, 0);
347 }
348 }
349
pci_msi_trigger(PCIDevice * dev,MSIMessage msg)350 static void pci_msi_trigger(PCIDevice *dev, MSIMessage msg)
351 {
352 MemTxAttrs attrs = {};
353
354 /*
355 * Xen uses the high bits of the address to contain some of the bits
356 * of the PIRQ#. Therefore we can't just send the write cycle and
357 * trust that it's caught by the APIC at 0xfee00000 because the
358 * target of the write might be e.g. 0x0x1000fee46000 for PIRQ#4166.
359 * So we intercept the delivery here instead of in kvm_send_msi().
360 */
361 if (xen_mode == XEN_EMULATE &&
362 xen_evtchn_deliver_pirq_msi(msg.address, msg.data)) {
363 return;
364 }
365 attrs.requester_id = pci_requester_id(dev);
366 address_space_stl_le(&dev->bus_master_as, msg.address, msg.data,
367 attrs, NULL);
368 }
369
pci_reset_regions(PCIDevice * dev)370 static void pci_reset_regions(PCIDevice *dev)
371 {
372 int r;
373 if (pci_is_vf(dev)) {
374 return;
375 }
376
377 for (r = 0; r < PCI_NUM_REGIONS; ++r) {
378 PCIIORegion *region = &dev->io_regions[r];
379 if (!region->size) {
380 continue;
381 }
382
383 if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) &&
384 region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
385 pci_set_quad(dev->config + pci_bar(dev, r), region->type);
386 } else {
387 pci_set_long(dev->config + pci_bar(dev, r), region->type);
388 }
389 }
390 }
391
pci_do_device_reset(PCIDevice * dev)392 static void pci_do_device_reset(PCIDevice *dev)
393 {
394 pci_device_deassert_intx(dev);
395 assert(dev->irq_state == 0);
396
397 /* Clear all writable bits */
398 pci_word_test_and_clear_mask(dev->config + PCI_COMMAND,
399 pci_get_word(dev->wmask + PCI_COMMAND) |
400 pci_get_word(dev->w1cmask + PCI_COMMAND));
401 pci_word_test_and_clear_mask(dev->config + PCI_STATUS,
402 pci_get_word(dev->wmask + PCI_STATUS) |
403 pci_get_word(dev->w1cmask + PCI_STATUS));
404 /* Some devices make bits of PCI_INTERRUPT_LINE read only */
405 pci_byte_test_and_clear_mask(dev->config + PCI_INTERRUPT_LINE,
406 pci_get_word(dev->wmask + PCI_INTERRUPT_LINE) |
407 pci_get_word(dev->w1cmask + PCI_INTERRUPT_LINE));
408 dev->config[PCI_CACHE_LINE_SIZE] = 0x0;
409 pci_reset_regions(dev);
410 pci_update_mappings(dev);
411
412 msi_reset(dev);
413 msix_reset(dev);
414 pcie_sriov_pf_reset(dev);
415 }
416
417 /*
418 * This function is called on #RST and FLR.
419 * FLR if PCI_EXP_DEVCTL_BCR_FLR is set
420 */
pci_device_reset(PCIDevice * dev)421 void pci_device_reset(PCIDevice *dev)
422 {
423 device_cold_reset(&dev->qdev);
424 pci_do_device_reset(dev);
425 }
426
427 /*
428 * Trigger pci bus reset under a given bus.
429 * Called via bus_cold_reset on RST# assert, after the devices
430 * have been reset device_cold_reset-ed already.
431 */
pcibus_reset_hold(Object * obj,ResetType type)432 static void pcibus_reset_hold(Object *obj, ResetType type)
433 {
434 PCIBus *bus = PCI_BUS(obj);
435 int i;
436
437 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
438 if (bus->devices[i]) {
439 pci_do_device_reset(bus->devices[i]);
440 }
441 }
442
443 for (i = 0; i < bus->nirq; i++) {
444 assert(bus->irq_count[i] == 0);
445 }
446 }
447
pci_host_bus_register(DeviceState * host)448 static void pci_host_bus_register(DeviceState *host)
449 {
450 PCIHostState *host_bridge = PCI_HOST_BRIDGE(host);
451
452 QLIST_INSERT_HEAD(&pci_host_bridges, host_bridge, next);
453 }
454
pci_host_bus_unregister(DeviceState * host)455 static void pci_host_bus_unregister(DeviceState *host)
456 {
457 PCIHostState *host_bridge = PCI_HOST_BRIDGE(host);
458
459 QLIST_REMOVE(host_bridge, next);
460 }
461
pci_device_root_bus(const PCIDevice * d)462 PCIBus *pci_device_root_bus(const PCIDevice *d)
463 {
464 PCIBus *bus = pci_get_bus(d);
465
466 while (!pci_bus_is_root(bus)) {
467 d = bus->parent_dev;
468 assert(d != NULL);
469
470 bus = pci_get_bus(d);
471 }
472
473 return bus;
474 }
475
pci_root_bus_path(PCIDevice * dev)476 const char *pci_root_bus_path(PCIDevice *dev)
477 {
478 PCIBus *rootbus = pci_device_root_bus(dev);
479 PCIHostState *host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent);
480 PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_GET_CLASS(host_bridge);
481
482 assert(host_bridge->bus == rootbus);
483
484 if (hc->root_bus_path) {
485 return (*hc->root_bus_path)(host_bridge, rootbus);
486 }
487
488 return rootbus->qbus.name;
489 }
490
pci_bus_bypass_iommu(PCIBus * bus)491 bool pci_bus_bypass_iommu(PCIBus *bus)
492 {
493 PCIBus *rootbus = bus;
494 PCIHostState *host_bridge;
495
496 if (!pci_bus_is_root(bus)) {
497 rootbus = pci_device_root_bus(bus->parent_dev);
498 }
499
500 host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent);
501
502 assert(host_bridge->bus == rootbus);
503
504 return host_bridge->bypass_iommu;
505 }
506
pci_root_bus_internal_init(PCIBus * bus,DeviceState * parent,MemoryRegion * mem,MemoryRegion * io,uint8_t devfn_min)507 static void pci_root_bus_internal_init(PCIBus *bus, DeviceState *parent,
508 MemoryRegion *mem, MemoryRegion *io,
509 uint8_t devfn_min)
510 {
511 assert(PCI_FUNC(devfn_min) == 0);
512 bus->devfn_min = devfn_min;
513 bus->slot_reserved_mask = 0x0;
514 bus->address_space_mem = mem;
515 bus->address_space_io = io;
516 bus->flags |= PCI_BUS_IS_ROOT;
517
518 /* host bridge */
519 QLIST_INIT(&bus->child);
520
521 pci_host_bus_register(parent);
522 }
523
pci_bus_uninit(PCIBus * bus)524 static void pci_bus_uninit(PCIBus *bus)
525 {
526 pci_host_bus_unregister(BUS(bus)->parent);
527 }
528
pci_bus_is_express(const PCIBus * bus)529 bool pci_bus_is_express(const PCIBus *bus)
530 {
531 return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS);
532 }
533
pci_root_bus_init(PCIBus * bus,size_t bus_size,DeviceState * parent,const char * name,MemoryRegion * mem,MemoryRegion * io,uint8_t devfn_min,const char * typename)534 void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent,
535 const char *name,
536 MemoryRegion *mem, MemoryRegion *io,
537 uint8_t devfn_min, const char *typename)
538 {
539 qbus_init(bus, bus_size, typename, parent, name);
540 pci_root_bus_internal_init(bus, parent, mem, io, devfn_min);
541 }
542
pci_root_bus_new(DeviceState * parent,const char * name,MemoryRegion * mem,MemoryRegion * io,uint8_t devfn_min,const char * typename)543 PCIBus *pci_root_bus_new(DeviceState *parent, const char *name,
544 MemoryRegion *mem, MemoryRegion *io,
545 uint8_t devfn_min, const char *typename)
546 {
547 PCIBus *bus;
548
549 bus = PCI_BUS(qbus_new(typename, parent, name));
550 pci_root_bus_internal_init(bus, parent, mem, io, devfn_min);
551 return bus;
552 }
553
pci_root_bus_cleanup(PCIBus * bus)554 void pci_root_bus_cleanup(PCIBus *bus)
555 {
556 pci_bus_uninit(bus);
557 /* the caller of the unplug hotplug handler will delete this device */
558 qbus_unrealize(BUS(bus));
559 }
560
pci_bus_irqs(PCIBus * bus,pci_set_irq_fn set_irq,void * irq_opaque,int nirq)561 void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq,
562 void *irq_opaque, int nirq)
563 {
564 bus->set_irq = set_irq;
565 bus->irq_opaque = irq_opaque;
566 bus->nirq = nirq;
567 g_free(bus->irq_count);
568 bus->irq_count = g_malloc0(nirq * sizeof(bus->irq_count[0]));
569 }
570
pci_bus_map_irqs(PCIBus * bus,pci_map_irq_fn map_irq)571 void pci_bus_map_irqs(PCIBus *bus, pci_map_irq_fn map_irq)
572 {
573 bus->map_irq = map_irq;
574 }
575
pci_bus_irqs_cleanup(PCIBus * bus)576 void pci_bus_irqs_cleanup(PCIBus *bus)
577 {
578 bus->set_irq = NULL;
579 bus->map_irq = NULL;
580 bus->irq_opaque = NULL;
581 bus->nirq = 0;
582 g_free(bus->irq_count);
583 bus->irq_count = NULL;
584 }
585
pci_register_root_bus(DeviceState * parent,const char * name,pci_set_irq_fn set_irq,pci_map_irq_fn map_irq,void * irq_opaque,MemoryRegion * mem,MemoryRegion * io,uint8_t devfn_min,int nirq,const char * typename)586 PCIBus *pci_register_root_bus(DeviceState *parent, const char *name,
587 pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
588 void *irq_opaque,
589 MemoryRegion *mem, MemoryRegion *io,
590 uint8_t devfn_min, int nirq,
591 const char *typename)
592 {
593 PCIBus *bus;
594
595 bus = pci_root_bus_new(parent, name, mem, io, devfn_min, typename);
596 pci_bus_irqs(bus, set_irq, irq_opaque, nirq);
597 pci_bus_map_irqs(bus, map_irq);
598 return bus;
599 }
600
pci_unregister_root_bus(PCIBus * bus)601 void pci_unregister_root_bus(PCIBus *bus)
602 {
603 pci_bus_irqs_cleanup(bus);
604 pci_root_bus_cleanup(bus);
605 }
606
pci_bus_num(PCIBus * s)607 int pci_bus_num(PCIBus *s)
608 {
609 return PCI_BUS_GET_CLASS(s)->bus_num(s);
610 }
611
612 /* Returns the min and max bus numbers of a PCI bus hierarchy */
pci_bus_range(PCIBus * bus,int * min_bus,int * max_bus)613 void pci_bus_range(PCIBus *bus, int *min_bus, int *max_bus)
614 {
615 int i;
616 *min_bus = *max_bus = pci_bus_num(bus);
617
618 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
619 PCIDevice *dev = bus->devices[i];
620
621 if (dev && IS_PCI_BRIDGE(dev)) {
622 *min_bus = MIN(*min_bus, dev->config[PCI_SECONDARY_BUS]);
623 *max_bus = MAX(*max_bus, dev->config[PCI_SUBORDINATE_BUS]);
624 }
625 }
626 }
627
pci_bus_numa_node(PCIBus * bus)628 int pci_bus_numa_node(PCIBus *bus)
629 {
630 return PCI_BUS_GET_CLASS(bus)->numa_node(bus);
631 }
632
get_pci_config_device(QEMUFile * f,void * pv,size_t size,const VMStateField * field)633 static int get_pci_config_device(QEMUFile *f, void *pv, size_t size,
634 const VMStateField *field)
635 {
636 PCIDevice *s = container_of(pv, PCIDevice, config);
637 uint8_t *config;
638 int i;
639
640 assert(size == pci_config_size(s));
641 config = g_malloc(size);
642
643 qemu_get_buffer(f, config, size);
644 for (i = 0; i < size; ++i) {
645 if ((config[i] ^ s->config[i]) &
646 s->cmask[i] & ~s->wmask[i] & ~s->w1cmask[i]) {
647 error_report("%s: Bad config data: i=0x%x read: %x device: %x "
648 "cmask: %x wmask: %x w1cmask:%x", __func__,
649 i, config[i], s->config[i],
650 s->cmask[i], s->wmask[i], s->w1cmask[i]);
651 g_free(config);
652 return -EINVAL;
653 }
654 }
655 memcpy(s->config, config, size);
656
657 pci_update_mappings(s);
658 if (IS_PCI_BRIDGE(s)) {
659 pci_bridge_update_mappings(PCI_BRIDGE(s));
660 }
661
662 memory_region_set_enabled(&s->bus_master_enable_region,
663 pci_get_word(s->config + PCI_COMMAND)
664 & PCI_COMMAND_MASTER);
665
666 g_free(config);
667 return 0;
668 }
669
670 /* just put buffer */
put_pci_config_device(QEMUFile * f,void * pv,size_t size,const VMStateField * field,JSONWriter * vmdesc)671 static int put_pci_config_device(QEMUFile *f, void *pv, size_t size,
672 const VMStateField *field, JSONWriter *vmdesc)
673 {
674 const uint8_t **v = pv;
675 assert(size == pci_config_size(container_of(pv, PCIDevice, config)));
676 qemu_put_buffer(f, *v, size);
677
678 return 0;
679 }
680
681 static const VMStateInfo vmstate_info_pci_config = {
682 .name = "pci config",
683 .get = get_pci_config_device,
684 .put = put_pci_config_device,
685 };
686
get_pci_irq_state(QEMUFile * f,void * pv,size_t size,const VMStateField * field)687 static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size,
688 const VMStateField *field)
689 {
690 PCIDevice *s = container_of(pv, PCIDevice, irq_state);
691 uint32_t irq_state[PCI_NUM_PINS];
692 int i;
693 for (i = 0; i < PCI_NUM_PINS; ++i) {
694 irq_state[i] = qemu_get_be32(f);
695 if (irq_state[i] != 0x1 && irq_state[i] != 0) {
696 fprintf(stderr, "irq state %d: must be 0 or 1.\n",
697 irq_state[i]);
698 return -EINVAL;
699 }
700 }
701
702 for (i = 0; i < PCI_NUM_PINS; ++i) {
703 pci_set_irq_state(s, i, irq_state[i]);
704 }
705
706 return 0;
707 }
708
put_pci_irq_state(QEMUFile * f,void * pv,size_t size,const VMStateField * field,JSONWriter * vmdesc)709 static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size,
710 const VMStateField *field, JSONWriter *vmdesc)
711 {
712 int i;
713 PCIDevice *s = container_of(pv, PCIDevice, irq_state);
714
715 for (i = 0; i < PCI_NUM_PINS; ++i) {
716 qemu_put_be32(f, pci_irq_state(s, i));
717 }
718
719 return 0;
720 }
721
722 static const VMStateInfo vmstate_info_pci_irq_state = {
723 .name = "pci irq state",
724 .get = get_pci_irq_state,
725 .put = put_pci_irq_state,
726 };
727
migrate_is_pcie(void * opaque,int version_id)728 static bool migrate_is_pcie(void *opaque, int version_id)
729 {
730 return pci_is_express((PCIDevice *)opaque);
731 }
732
migrate_is_not_pcie(void * opaque,int version_id)733 static bool migrate_is_not_pcie(void *opaque, int version_id)
734 {
735 return !pci_is_express((PCIDevice *)opaque);
736 }
737
738 const VMStateDescription vmstate_pci_device = {
739 .name = "PCIDevice",
740 .version_id = 2,
741 .minimum_version_id = 1,
742 .fields = (const VMStateField[]) {
743 VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice),
744 VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice,
745 migrate_is_not_pcie,
746 0, vmstate_info_pci_config,
747 PCI_CONFIG_SPACE_SIZE),
748 VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice,
749 migrate_is_pcie,
750 0, vmstate_info_pci_config,
751 PCIE_CONFIG_SPACE_SIZE),
752 VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2,
753 vmstate_info_pci_irq_state,
754 PCI_NUM_PINS * sizeof(int32_t)),
755 VMSTATE_END_OF_LIST()
756 }
757 };
758
759
pci_device_save(PCIDevice * s,QEMUFile * f)760 void pci_device_save(PCIDevice *s, QEMUFile *f)
761 {
762 /* Clear interrupt status bit: it is implicit
763 * in irq_state which we are saving.
764 * This makes us compatible with old devices
765 * which never set or clear this bit. */
766 s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT;
767 vmstate_save_state(f, &vmstate_pci_device, s, NULL);
768 /* Restore the interrupt status bit. */
769 pci_update_irq_status(s);
770 }
771
pci_device_load(PCIDevice * s,QEMUFile * f)772 int pci_device_load(PCIDevice *s, QEMUFile *f)
773 {
774 int ret;
775 ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id);
776 /* Restore the interrupt status bit. */
777 pci_update_irq_status(s);
778 return ret;
779 }
780
pci_set_default_subsystem_id(PCIDevice * pci_dev)781 static void pci_set_default_subsystem_id(PCIDevice *pci_dev)
782 {
783 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
784 pci_default_sub_vendor_id);
785 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
786 pci_default_sub_device_id);
787 }
788
789 /*
790 * Parse [[<domain>:]<bus>:]<slot>, return -1 on error if funcp == NULL
791 * [[<domain>:]<bus>:]<slot>.<func>, return -1 on error
792 */
pci_parse_devaddr(const char * addr,int * domp,int * busp,unsigned int * slotp,unsigned int * funcp)793 static int pci_parse_devaddr(const char *addr, int *domp, int *busp,
794 unsigned int *slotp, unsigned int *funcp)
795 {
796 const char *p;
797 char *e;
798 unsigned long val;
799 unsigned long dom = 0, bus = 0;
800 unsigned int slot = 0;
801 unsigned int func = 0;
802
803 p = addr;
804 val = strtoul(p, &e, 16);
805 if (e == p)
806 return -1;
807 if (*e == ':') {
808 bus = val;
809 p = e + 1;
810 val = strtoul(p, &e, 16);
811 if (e == p)
812 return -1;
813 if (*e == ':') {
814 dom = bus;
815 bus = val;
816 p = e + 1;
817 val = strtoul(p, &e, 16);
818 if (e == p)
819 return -1;
820 }
821 }
822
823 slot = val;
824
825 if (funcp != NULL) {
826 if (*e != '.')
827 return -1;
828
829 p = e + 1;
830 val = strtoul(p, &e, 16);
831 if (e == p)
832 return -1;
833
834 func = val;
835 }
836
837 /* if funcp == NULL func is 0 */
838 if (dom > 0xffff || bus > 0xff || slot > 0x1f || func > 7)
839 return -1;
840
841 if (*e)
842 return -1;
843
844 *domp = dom;
845 *busp = bus;
846 *slotp = slot;
847 if (funcp != NULL)
848 *funcp = func;
849 return 0;
850 }
851
pci_init_cmask(PCIDevice * dev)852 static void pci_init_cmask(PCIDevice *dev)
853 {
854 pci_set_word(dev->cmask + PCI_VENDOR_ID, 0xffff);
855 pci_set_word(dev->cmask + PCI_DEVICE_ID, 0xffff);
856 dev->cmask[PCI_STATUS] = PCI_STATUS_CAP_LIST;
857 dev->cmask[PCI_REVISION_ID] = 0xff;
858 dev->cmask[PCI_CLASS_PROG] = 0xff;
859 pci_set_word(dev->cmask + PCI_CLASS_DEVICE, 0xffff);
860 dev->cmask[PCI_HEADER_TYPE] = 0xff;
861 dev->cmask[PCI_CAPABILITY_LIST] = 0xff;
862 }
863
pci_init_wmask(PCIDevice * dev)864 static void pci_init_wmask(PCIDevice *dev)
865 {
866 int config_size = pci_config_size(dev);
867
868 dev->wmask[PCI_CACHE_LINE_SIZE] = 0xff;
869 dev->wmask[PCI_INTERRUPT_LINE] = 0xff;
870 pci_set_word(dev->wmask + PCI_COMMAND,
871 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
872 PCI_COMMAND_INTX_DISABLE);
873 pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR);
874
875 memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff,
876 config_size - PCI_CONFIG_HEADER_SIZE);
877 }
878
pci_init_w1cmask(PCIDevice * dev)879 static void pci_init_w1cmask(PCIDevice *dev)
880 {
881 /*
882 * Note: It's okay to set w1cmask even for readonly bits as
883 * long as their value is hardwired to 0.
884 */
885 pci_set_word(dev->w1cmask + PCI_STATUS,
886 PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT |
887 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT |
888 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY);
889 }
890
pci_init_mask_bridge(PCIDevice * d)891 static void pci_init_mask_bridge(PCIDevice *d)
892 {
893 /* PCI_PRIMARY_BUS, PCI_SECONDARY_BUS, PCI_SUBORDINATE_BUS and
894 PCI_SEC_LATENCY_TIMER */
895 memset(d->wmask + PCI_PRIMARY_BUS, 0xff, 4);
896
897 /* base and limit */
898 d->wmask[PCI_IO_BASE] = PCI_IO_RANGE_MASK & 0xff;
899 d->wmask[PCI_IO_LIMIT] = PCI_IO_RANGE_MASK & 0xff;
900 pci_set_word(d->wmask + PCI_MEMORY_BASE,
901 PCI_MEMORY_RANGE_MASK & 0xffff);
902 pci_set_word(d->wmask + PCI_MEMORY_LIMIT,
903 PCI_MEMORY_RANGE_MASK & 0xffff);
904 pci_set_word(d->wmask + PCI_PREF_MEMORY_BASE,
905 PCI_PREF_RANGE_MASK & 0xffff);
906 pci_set_word(d->wmask + PCI_PREF_MEMORY_LIMIT,
907 PCI_PREF_RANGE_MASK & 0xffff);
908
909 /* PCI_PREF_BASE_UPPER32 and PCI_PREF_LIMIT_UPPER32 */
910 memset(d->wmask + PCI_PREF_BASE_UPPER32, 0xff, 8);
911
912 /* Supported memory and i/o types */
913 d->config[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_16;
914 d->config[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_16;
915 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_BASE,
916 PCI_PREF_RANGE_TYPE_64);
917 pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_LIMIT,
918 PCI_PREF_RANGE_TYPE_64);
919
920 /*
921 * TODO: Bridges default to 10-bit VGA decoding but we currently only
922 * implement 16-bit decoding (no alias support).
923 */
924 pci_set_word(d->wmask + PCI_BRIDGE_CONTROL,
925 PCI_BRIDGE_CTL_PARITY |
926 PCI_BRIDGE_CTL_SERR |
927 PCI_BRIDGE_CTL_ISA |
928 PCI_BRIDGE_CTL_VGA |
929 PCI_BRIDGE_CTL_VGA_16BIT |
930 PCI_BRIDGE_CTL_MASTER_ABORT |
931 PCI_BRIDGE_CTL_BUS_RESET |
932 PCI_BRIDGE_CTL_FAST_BACK |
933 PCI_BRIDGE_CTL_DISCARD |
934 PCI_BRIDGE_CTL_SEC_DISCARD |
935 PCI_BRIDGE_CTL_DISCARD_SERR);
936 /* Below does not do anything as we never set this bit, put here for
937 * completeness. */
938 pci_set_word(d->w1cmask + PCI_BRIDGE_CONTROL,
939 PCI_BRIDGE_CTL_DISCARD_STATUS);
940 d->cmask[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_MASK;
941 d->cmask[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_MASK;
942 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_BASE,
943 PCI_PREF_RANGE_TYPE_MASK);
944 pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_LIMIT,
945 PCI_PREF_RANGE_TYPE_MASK);
946 }
947
pci_init_multifunction(PCIBus * bus,PCIDevice * dev,Error ** errp)948 static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp)
949 {
950 uint8_t slot = PCI_SLOT(dev->devfn);
951 uint8_t func;
952
953 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
954 dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
955 }
956
957 /*
958 * With SR/IOV and ARI, a device at function 0 need not be a multifunction
959 * device, as it may just be a VF that ended up with function 0 in
960 * the legacy PCI interpretation. Avoid failing in such cases:
961 */
962 if (pci_is_vf(dev) &&
963 dev->exp.sriov_vf.pf->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
964 return;
965 }
966
967 /*
968 * multifunction bit is interpreted in two ways as follows.
969 * - all functions must set the bit to 1.
970 * Example: Intel X53
971 * - function 0 must set the bit, but the rest function (> 0)
972 * is allowed to leave the bit to 0.
973 * Example: PIIX3(also in qemu), PIIX4(also in qemu), ICH10,
974 *
975 * So OS (at least Linux) checks the bit of only function 0,
976 * and doesn't see the bit of function > 0.
977 *
978 * The below check allows both interpretation.
979 */
980 if (PCI_FUNC(dev->devfn)) {
981 PCIDevice *f0 = bus->devices[PCI_DEVFN(slot, 0)];
982 if (f0 && !(f0->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) {
983 /* function 0 should set multifunction bit */
984 error_setg(errp, "PCI: single function device can't be populated "
985 "in function %x.%x", slot, PCI_FUNC(dev->devfn));
986 return;
987 }
988 return;
989 }
990
991 if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
992 return;
993 }
994 /* function 0 indicates single function, so function > 0 must be NULL */
995 for (func = 1; func < PCI_FUNC_MAX; ++func) {
996 if (bus->devices[PCI_DEVFN(slot, func)]) {
997 error_setg(errp, "PCI: %x.0 indicates single function, "
998 "but %x.%x is already populated.",
999 slot, slot, func);
1000 return;
1001 }
1002 }
1003 }
1004
pci_config_alloc(PCIDevice * pci_dev)1005 static void pci_config_alloc(PCIDevice *pci_dev)
1006 {
1007 int config_size = pci_config_size(pci_dev);
1008
1009 pci_dev->config = g_malloc0(config_size);
1010 pci_dev->cmask = g_malloc0(config_size);
1011 pci_dev->wmask = g_malloc0(config_size);
1012 pci_dev->w1cmask = g_malloc0(config_size);
1013 pci_dev->used = g_malloc0(config_size);
1014 }
1015
pci_config_free(PCIDevice * pci_dev)1016 static void pci_config_free(PCIDevice *pci_dev)
1017 {
1018 g_free(pci_dev->config);
1019 g_free(pci_dev->cmask);
1020 g_free(pci_dev->wmask);
1021 g_free(pci_dev->w1cmask);
1022 g_free(pci_dev->used);
1023 }
1024
do_pci_unregister_device(PCIDevice * pci_dev)1025 static void do_pci_unregister_device(PCIDevice *pci_dev)
1026 {
1027 pci_get_bus(pci_dev)->devices[pci_dev->devfn] = NULL;
1028 pci_config_free(pci_dev);
1029
1030 if (xen_mode == XEN_EMULATE) {
1031 xen_evtchn_remove_pci_device(pci_dev);
1032 }
1033 if (memory_region_is_mapped(&pci_dev->bus_master_enable_region)) {
1034 memory_region_del_subregion(&pci_dev->bus_master_container_region,
1035 &pci_dev->bus_master_enable_region);
1036 }
1037 address_space_destroy(&pci_dev->bus_master_as);
1038 }
1039
1040 /* Extract PCIReqIDCache into BDF format */
pci_req_id_cache_extract(PCIReqIDCache * cache)1041 static uint16_t pci_req_id_cache_extract(PCIReqIDCache *cache)
1042 {
1043 uint8_t bus_n;
1044 uint16_t result;
1045
1046 switch (cache->type) {
1047 case PCI_REQ_ID_BDF:
1048 result = pci_get_bdf(cache->dev);
1049 break;
1050 case PCI_REQ_ID_SECONDARY_BUS:
1051 bus_n = pci_dev_bus_num(cache->dev);
1052 result = PCI_BUILD_BDF(bus_n, 0);
1053 break;
1054 default:
1055 error_report("Invalid PCI requester ID cache type: %d",
1056 cache->type);
1057 exit(1);
1058 break;
1059 }
1060
1061 return result;
1062 }
1063
1064 /* Parse bridges up to the root complex and return requester ID
1065 * cache for specific device. For full PCIe topology, the cache
1066 * result would be exactly the same as getting BDF of the device.
1067 * However, several tricks are required when system mixed up with
1068 * legacy PCI devices and PCIe-to-PCI bridges.
1069 *
1070 * Here we cache the proxy device (and type) not requester ID since
1071 * bus number might change from time to time.
1072 */
pci_req_id_cache_get(PCIDevice * dev)1073 static PCIReqIDCache pci_req_id_cache_get(PCIDevice *dev)
1074 {
1075 PCIDevice *parent;
1076 PCIReqIDCache cache = {
1077 .dev = dev,
1078 .type = PCI_REQ_ID_BDF,
1079 };
1080
1081 while (!pci_bus_is_root(pci_get_bus(dev))) {
1082 /* We are under PCI/PCIe bridges */
1083 parent = pci_get_bus(dev)->parent_dev;
1084 if (pci_is_express(parent)) {
1085 if (pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) {
1086 /* When we pass through PCIe-to-PCI/PCIX bridges, we
1087 * override the requester ID using secondary bus
1088 * number of parent bridge with zeroed devfn
1089 * (pcie-to-pci bridge spec chap 2.3). */
1090 cache.type = PCI_REQ_ID_SECONDARY_BUS;
1091 cache.dev = dev;
1092 }
1093 } else {
1094 /* Legacy PCI, override requester ID with the bridge's
1095 * BDF upstream. When the root complex connects to
1096 * legacy PCI devices (including buses), it can only
1097 * obtain requester ID info from directly attached
1098 * devices. If devices are attached under bridges, only
1099 * the requester ID of the bridge that is directly
1100 * attached to the root complex can be recognized. */
1101 cache.type = PCI_REQ_ID_BDF;
1102 cache.dev = parent;
1103 }
1104 dev = parent;
1105 }
1106
1107 return cache;
1108 }
1109
pci_requester_id(PCIDevice * dev)1110 uint16_t pci_requester_id(PCIDevice *dev)
1111 {
1112 return pci_req_id_cache_extract(&dev->requester_id_cache);
1113 }
1114
pci_bus_devfn_available(PCIBus * bus,int devfn)1115 static bool pci_bus_devfn_available(PCIBus *bus, int devfn)
1116 {
1117 return !(bus->devices[devfn]);
1118 }
1119
pci_bus_devfn_reserved(PCIBus * bus,int devfn)1120 static bool pci_bus_devfn_reserved(PCIBus *bus, int devfn)
1121 {
1122 return bus->slot_reserved_mask & (1UL << PCI_SLOT(devfn));
1123 }
1124
pci_bus_get_slot_reserved_mask(PCIBus * bus)1125 uint32_t pci_bus_get_slot_reserved_mask(PCIBus *bus)
1126 {
1127 return bus->slot_reserved_mask;
1128 }
1129
pci_bus_set_slot_reserved_mask(PCIBus * bus,uint32_t mask)1130 void pci_bus_set_slot_reserved_mask(PCIBus *bus, uint32_t mask)
1131 {
1132 bus->slot_reserved_mask |= mask;
1133 }
1134
pci_bus_clear_slot_reserved_mask(PCIBus * bus,uint32_t mask)1135 void pci_bus_clear_slot_reserved_mask(PCIBus *bus, uint32_t mask)
1136 {
1137 bus->slot_reserved_mask &= ~mask;
1138 }
1139
1140 /* -1 for devfn means auto assign */
do_pci_register_device(PCIDevice * pci_dev,const char * name,int devfn,Error ** errp)1141 static PCIDevice *do_pci_register_device(PCIDevice *pci_dev,
1142 const char *name, int devfn,
1143 Error **errp)
1144 {
1145 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
1146 PCIConfigReadFunc *config_read = pc->config_read;
1147 PCIConfigWriteFunc *config_write = pc->config_write;
1148 Error *local_err = NULL;
1149 DeviceState *dev = DEVICE(pci_dev);
1150 PCIBus *bus = pci_get_bus(pci_dev);
1151 bool is_bridge = IS_PCI_BRIDGE(pci_dev);
1152
1153 /* Only pci bridges can be attached to extra PCI root buses */
1154 if (pci_bus_is_root(bus) && bus->parent_dev && !is_bridge) {
1155 error_setg(errp,
1156 "PCI: Only PCI/PCIe bridges can be plugged into %s",
1157 bus->parent_dev->name);
1158 return NULL;
1159 }
1160
1161 if (devfn < 0) {
1162 for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices);
1163 devfn += PCI_FUNC_MAX) {
1164 if (pci_bus_devfn_available(bus, devfn) &&
1165 !pci_bus_devfn_reserved(bus, devfn)) {
1166 goto found;
1167 }
1168 }
1169 error_setg(errp, "PCI: no slot/function available for %s, all in use "
1170 "or reserved", name);
1171 return NULL;
1172 found: ;
1173 } else if (pci_bus_devfn_reserved(bus, devfn)) {
1174 error_setg(errp, "PCI: slot %d function %d not available for %s,"
1175 " reserved",
1176 PCI_SLOT(devfn), PCI_FUNC(devfn), name);
1177 return NULL;
1178 } else if (!pci_bus_devfn_available(bus, devfn)) {
1179 error_setg(errp, "PCI: slot %d function %d not available for %s,"
1180 " in use by %s,id=%s",
1181 PCI_SLOT(devfn), PCI_FUNC(devfn), name,
1182 bus->devices[devfn]->name, bus->devices[devfn]->qdev.id);
1183 return NULL;
1184 } /*
1185 * Populating function 0 triggers a scan from the guest that
1186 * exposes other non-zero functions. Hence we need to ensure that
1187 * function 0 wasn't added yet.
1188 */
1189 else if (dev->hotplugged &&
1190 !pci_is_vf(pci_dev) &&
1191 pci_get_function_0(pci_dev)) {
1192 error_setg(errp, "PCI: slot %d function 0 already occupied by %s,"
1193 " new func %s cannot be exposed to guest.",
1194 PCI_SLOT(pci_get_function_0(pci_dev)->devfn),
1195 pci_get_function_0(pci_dev)->name,
1196 name);
1197
1198 return NULL;
1199 }
1200
1201 pci_dev->devfn = devfn;
1202 pci_dev->requester_id_cache = pci_req_id_cache_get(pci_dev);
1203 pstrcpy(pci_dev->name, sizeof(pci_dev->name), name);
1204
1205 memory_region_init(&pci_dev->bus_master_container_region, OBJECT(pci_dev),
1206 "bus master container", UINT64_MAX);
1207 address_space_init(&pci_dev->bus_master_as,
1208 &pci_dev->bus_master_container_region, pci_dev->name);
1209 pci_dev->bus_master_as.max_bounce_buffer_size =
1210 pci_dev->max_bounce_buffer_size;
1211
1212 if (phase_check(PHASE_MACHINE_READY)) {
1213 pci_init_bus_master(pci_dev);
1214 }
1215 pci_dev->irq_state = 0;
1216 pci_config_alloc(pci_dev);
1217
1218 pci_config_set_vendor_id(pci_dev->config, pc->vendor_id);
1219 pci_config_set_device_id(pci_dev->config, pc->device_id);
1220 pci_config_set_revision(pci_dev->config, pc->revision);
1221 pci_config_set_class(pci_dev->config, pc->class_id);
1222
1223 if (!is_bridge) {
1224 if (pc->subsystem_vendor_id || pc->subsystem_id) {
1225 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
1226 pc->subsystem_vendor_id);
1227 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
1228 pc->subsystem_id);
1229 } else {
1230 pci_set_default_subsystem_id(pci_dev);
1231 }
1232 } else {
1233 /* subsystem_vendor_id/subsystem_id are only for header type 0 */
1234 assert(!pc->subsystem_vendor_id);
1235 assert(!pc->subsystem_id);
1236 }
1237 pci_init_cmask(pci_dev);
1238 pci_init_wmask(pci_dev);
1239 pci_init_w1cmask(pci_dev);
1240 if (is_bridge) {
1241 pci_init_mask_bridge(pci_dev);
1242 }
1243 pci_init_multifunction(bus, pci_dev, &local_err);
1244 if (local_err) {
1245 error_propagate(errp, local_err);
1246 do_pci_unregister_device(pci_dev);
1247 return NULL;
1248 }
1249
1250 if (!config_read)
1251 config_read = pci_default_read_config;
1252 if (!config_write)
1253 config_write = pci_default_write_config;
1254 pci_dev->config_read = config_read;
1255 pci_dev->config_write = config_write;
1256 bus->devices[devfn] = pci_dev;
1257 pci_dev->version_id = 2; /* Current pci device vmstate version */
1258 return pci_dev;
1259 }
1260
pci_unregister_io_regions(PCIDevice * pci_dev)1261 static void pci_unregister_io_regions(PCIDevice *pci_dev)
1262 {
1263 PCIIORegion *r;
1264 int i;
1265
1266 for(i = 0; i < PCI_NUM_REGIONS; i++) {
1267 r = &pci_dev->io_regions[i];
1268 if (!r->size || r->addr == PCI_BAR_UNMAPPED)
1269 continue;
1270 memory_region_del_subregion(r->address_space, r->memory);
1271 }
1272
1273 pci_unregister_vga(pci_dev);
1274 }
1275
pci_qdev_unrealize(DeviceState * dev)1276 static void pci_qdev_unrealize(DeviceState *dev)
1277 {
1278 PCIDevice *pci_dev = PCI_DEVICE(dev);
1279 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
1280
1281 pci_unregister_io_regions(pci_dev);
1282 pci_del_option_rom(pci_dev);
1283
1284 if (pc->exit) {
1285 pc->exit(pci_dev);
1286 }
1287
1288 pci_device_deassert_intx(pci_dev);
1289 do_pci_unregister_device(pci_dev);
1290
1291 pci_dev->msi_trigger = NULL;
1292
1293 /*
1294 * clean up acpi-index so it could reused by another device
1295 */
1296 if (pci_dev->acpi_index) {
1297 GSequence *used_indexes = pci_acpi_index_list();
1298
1299 g_sequence_remove(g_sequence_lookup(used_indexes,
1300 GINT_TO_POINTER(pci_dev->acpi_index),
1301 g_cmp_uint32, NULL));
1302 }
1303 }
1304
pci_register_bar(PCIDevice * pci_dev,int region_num,uint8_t type,MemoryRegion * memory)1305 void pci_register_bar(PCIDevice *pci_dev, int region_num,
1306 uint8_t type, MemoryRegion *memory)
1307 {
1308 PCIIORegion *r;
1309 uint32_t addr; /* offset in pci config space */
1310 uint64_t wmask;
1311 pcibus_t size = memory_region_size(memory);
1312 uint8_t hdr_type;
1313
1314 assert(!pci_is_vf(pci_dev)); /* VFs must use pcie_sriov_vf_register_bar */
1315 assert(region_num >= 0);
1316 assert(region_num < PCI_NUM_REGIONS);
1317 assert(is_power_of_2(size));
1318
1319 /* A PCI bridge device (with Type 1 header) may only have at most 2 BARs */
1320 hdr_type =
1321 pci_dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION;
1322 assert(hdr_type != PCI_HEADER_TYPE_BRIDGE || region_num < 2);
1323
1324 r = &pci_dev->io_regions[region_num];
1325 r->addr = PCI_BAR_UNMAPPED;
1326 r->size = size;
1327 r->type = type;
1328 r->memory = memory;
1329 r->address_space = type & PCI_BASE_ADDRESS_SPACE_IO
1330 ? pci_get_bus(pci_dev)->address_space_io
1331 : pci_get_bus(pci_dev)->address_space_mem;
1332
1333 wmask = ~(size - 1);
1334 if (region_num == PCI_ROM_SLOT) {
1335 /* ROM enable bit is writable */
1336 wmask |= PCI_ROM_ADDRESS_ENABLE;
1337 }
1338
1339 addr = pci_bar(pci_dev, region_num);
1340 pci_set_long(pci_dev->config + addr, type);
1341
1342 if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) &&
1343 r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
1344 pci_set_quad(pci_dev->wmask + addr, wmask);
1345 pci_set_quad(pci_dev->cmask + addr, ~0ULL);
1346 } else {
1347 pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff);
1348 pci_set_long(pci_dev->cmask + addr, 0xffffffff);
1349 }
1350 }
1351
pci_update_vga(PCIDevice * pci_dev)1352 static void pci_update_vga(PCIDevice *pci_dev)
1353 {
1354 uint16_t cmd;
1355
1356 if (!pci_dev->has_vga) {
1357 return;
1358 }
1359
1360 cmd = pci_get_word(pci_dev->config + PCI_COMMAND);
1361
1362 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_MEM],
1363 cmd & PCI_COMMAND_MEMORY);
1364 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO],
1365 cmd & PCI_COMMAND_IO);
1366 memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI],
1367 cmd & PCI_COMMAND_IO);
1368 }
1369
pci_register_vga(PCIDevice * pci_dev,MemoryRegion * mem,MemoryRegion * io_lo,MemoryRegion * io_hi)1370 void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem,
1371 MemoryRegion *io_lo, MemoryRegion *io_hi)
1372 {
1373 PCIBus *bus = pci_get_bus(pci_dev);
1374
1375 assert(!pci_dev->has_vga);
1376
1377 assert(memory_region_size(mem) == QEMU_PCI_VGA_MEM_SIZE);
1378 pci_dev->vga_regions[QEMU_PCI_VGA_MEM] = mem;
1379 memory_region_add_subregion_overlap(bus->address_space_mem,
1380 QEMU_PCI_VGA_MEM_BASE, mem, 1);
1381
1382 assert(memory_region_size(io_lo) == QEMU_PCI_VGA_IO_LO_SIZE);
1383 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO] = io_lo;
1384 memory_region_add_subregion_overlap(bus->address_space_io,
1385 QEMU_PCI_VGA_IO_LO_BASE, io_lo, 1);
1386
1387 assert(memory_region_size(io_hi) == QEMU_PCI_VGA_IO_HI_SIZE);
1388 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI] = io_hi;
1389 memory_region_add_subregion_overlap(bus->address_space_io,
1390 QEMU_PCI_VGA_IO_HI_BASE, io_hi, 1);
1391 pci_dev->has_vga = true;
1392
1393 pci_update_vga(pci_dev);
1394 }
1395
pci_unregister_vga(PCIDevice * pci_dev)1396 void pci_unregister_vga(PCIDevice *pci_dev)
1397 {
1398 PCIBus *bus = pci_get_bus(pci_dev);
1399
1400 if (!pci_dev->has_vga) {
1401 return;
1402 }
1403
1404 memory_region_del_subregion(bus->address_space_mem,
1405 pci_dev->vga_regions[QEMU_PCI_VGA_MEM]);
1406 memory_region_del_subregion(bus->address_space_io,
1407 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO]);
1408 memory_region_del_subregion(bus->address_space_io,
1409 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI]);
1410 pci_dev->has_vga = false;
1411 }
1412
pci_get_bar_addr(PCIDevice * pci_dev,int region_num)1413 pcibus_t pci_get_bar_addr(PCIDevice *pci_dev, int region_num)
1414 {
1415 return pci_dev->io_regions[region_num].addr;
1416 }
1417
pci_config_get_bar_addr(PCIDevice * d,int reg,uint8_t type,pcibus_t size)1418 static pcibus_t pci_config_get_bar_addr(PCIDevice *d, int reg,
1419 uint8_t type, pcibus_t size)
1420 {
1421 pcibus_t new_addr;
1422 if (!pci_is_vf(d)) {
1423 int bar = pci_bar(d, reg);
1424 if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
1425 new_addr = pci_get_quad(d->config + bar);
1426 } else {
1427 new_addr = pci_get_long(d->config + bar);
1428 }
1429 } else {
1430 PCIDevice *pf = d->exp.sriov_vf.pf;
1431 uint16_t sriov_cap = pf->exp.sriov_cap;
1432 int bar = sriov_cap + PCI_SRIOV_BAR + reg * 4;
1433 uint16_t vf_offset =
1434 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_OFFSET);
1435 uint16_t vf_stride =
1436 pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_STRIDE);
1437 uint32_t vf_num = (d->devfn - (pf->devfn + vf_offset)) / vf_stride;
1438
1439 if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
1440 new_addr = pci_get_quad(pf->config + bar);
1441 } else {
1442 new_addr = pci_get_long(pf->config + bar);
1443 }
1444 new_addr += vf_num * size;
1445 }
1446 /* The ROM slot has a specific enable bit, keep it intact */
1447 if (reg != PCI_ROM_SLOT) {
1448 new_addr &= ~(size - 1);
1449 }
1450 return new_addr;
1451 }
1452
pci_bar_address(PCIDevice * d,int reg,uint8_t type,pcibus_t size)1453 pcibus_t pci_bar_address(PCIDevice *d,
1454 int reg, uint8_t type, pcibus_t size)
1455 {
1456 pcibus_t new_addr, last_addr;
1457 uint16_t cmd = pci_get_word(d->config + PCI_COMMAND);
1458 MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
1459 bool allow_0_address = mc->pci_allow_0_address;
1460
1461 if (type & PCI_BASE_ADDRESS_SPACE_IO) {
1462 if (!(cmd & PCI_COMMAND_IO)) {
1463 return PCI_BAR_UNMAPPED;
1464 }
1465 new_addr = pci_config_get_bar_addr(d, reg, type, size);
1466 last_addr = new_addr + size - 1;
1467 /* Check if 32 bit BAR wraps around explicitly.
1468 * TODO: make priorities correct and remove this work around.
1469 */
1470 if (last_addr <= new_addr || last_addr >= UINT32_MAX ||
1471 (!allow_0_address && new_addr == 0)) {
1472 return PCI_BAR_UNMAPPED;
1473 }
1474 return new_addr;
1475 }
1476
1477 if (!(cmd & PCI_COMMAND_MEMORY)) {
1478 return PCI_BAR_UNMAPPED;
1479 }
1480 new_addr = pci_config_get_bar_addr(d, reg, type, size);
1481 /* the ROM slot has a specific enable bit */
1482 if (reg == PCI_ROM_SLOT && !(new_addr & PCI_ROM_ADDRESS_ENABLE)) {
1483 return PCI_BAR_UNMAPPED;
1484 }
1485 new_addr &= ~(size - 1);
1486 last_addr = new_addr + size - 1;
1487 /* NOTE: we do not support wrapping */
1488 /* XXX: as we cannot support really dynamic
1489 mappings, we handle specific values as invalid
1490 mappings. */
1491 if (last_addr <= new_addr || last_addr == PCI_BAR_UNMAPPED ||
1492 (!allow_0_address && new_addr == 0)) {
1493 return PCI_BAR_UNMAPPED;
1494 }
1495
1496 /* Now pcibus_t is 64bit.
1497 * Check if 32 bit BAR wraps around explicitly.
1498 * Without this, PC ide doesn't work well.
1499 * TODO: remove this work around.
1500 */
1501 if (!(type & PCI_BASE_ADDRESS_MEM_TYPE_64) && last_addr >= UINT32_MAX) {
1502 return PCI_BAR_UNMAPPED;
1503 }
1504
1505 /*
1506 * OS is allowed to set BAR beyond its addressable
1507 * bits. For example, 32 bit OS can set 64bit bar
1508 * to >4G. Check it. TODO: we might need to support
1509 * it in the future for e.g. PAE.
1510 */
1511 if (last_addr >= HWADDR_MAX) {
1512 return PCI_BAR_UNMAPPED;
1513 }
1514
1515 return new_addr;
1516 }
1517
pci_update_mappings(PCIDevice * d)1518 static void pci_update_mappings(PCIDevice *d)
1519 {
1520 PCIIORegion *r;
1521 int i;
1522 pcibus_t new_addr;
1523
1524 for(i = 0; i < PCI_NUM_REGIONS; i++) {
1525 r = &d->io_regions[i];
1526
1527 /* this region isn't registered */
1528 if (!r->size)
1529 continue;
1530
1531 new_addr = pci_bar_address(d, i, r->type, r->size);
1532 if (!d->has_power) {
1533 new_addr = PCI_BAR_UNMAPPED;
1534 }
1535
1536 /* This bar isn't changed */
1537 if (new_addr == r->addr)
1538 continue;
1539
1540 /* now do the real mapping */
1541 if (r->addr != PCI_BAR_UNMAPPED) {
1542 trace_pci_update_mappings_del(d->name, pci_dev_bus_num(d),
1543 PCI_SLOT(d->devfn),
1544 PCI_FUNC(d->devfn),
1545 i, r->addr, r->size);
1546 memory_region_del_subregion(r->address_space, r->memory);
1547 }
1548 r->addr = new_addr;
1549 if (r->addr != PCI_BAR_UNMAPPED) {
1550 trace_pci_update_mappings_add(d->name, pci_dev_bus_num(d),
1551 PCI_SLOT(d->devfn),
1552 PCI_FUNC(d->devfn),
1553 i, r->addr, r->size);
1554 memory_region_add_subregion_overlap(r->address_space,
1555 r->addr, r->memory, 1);
1556 }
1557 }
1558
1559 pci_update_vga(d);
1560 }
1561
pci_irq_disabled(PCIDevice * d)1562 static inline int pci_irq_disabled(PCIDevice *d)
1563 {
1564 return pci_get_word(d->config + PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE;
1565 }
1566
1567 /* Called after interrupt disabled field update in config space,
1568 * assert/deassert interrupts if necessary.
1569 * Gets original interrupt disable bit value (before update). */
pci_update_irq_disabled(PCIDevice * d,int was_irq_disabled)1570 static void pci_update_irq_disabled(PCIDevice *d, int was_irq_disabled)
1571 {
1572 int i, disabled = pci_irq_disabled(d);
1573 if (disabled == was_irq_disabled)
1574 return;
1575 for (i = 0; i < PCI_NUM_PINS; ++i) {
1576 int state = pci_irq_state(d, i);
1577 pci_change_irq_level(d, i, disabled ? -state : state);
1578 }
1579 }
1580
pci_default_read_config(PCIDevice * d,uint32_t address,int len)1581 uint32_t pci_default_read_config(PCIDevice *d,
1582 uint32_t address, int len)
1583 {
1584 uint32_t val = 0;
1585
1586 assert(address + len <= pci_config_size(d));
1587
1588 if (pci_is_express_downstream_port(d) &&
1589 ranges_overlap(address, len, d->exp.exp_cap + PCI_EXP_LNKSTA, 2)) {
1590 pcie_sync_bridge_lnk(d);
1591 }
1592 memcpy(&val, d->config + address, len);
1593 return le32_to_cpu(val);
1594 }
1595
pci_default_write_config(PCIDevice * d,uint32_t addr,uint32_t val_in,int l)1596 void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int l)
1597 {
1598 int i, was_irq_disabled = pci_irq_disabled(d);
1599 uint32_t val = val_in;
1600
1601 assert(addr + l <= pci_config_size(d));
1602
1603 for (i = 0; i < l; val >>= 8, ++i) {
1604 uint8_t wmask = d->wmask[addr + i];
1605 uint8_t w1cmask = d->w1cmask[addr + i];
1606 assert(!(wmask & w1cmask));
1607 d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask);
1608 d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */
1609 }
1610 if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) ||
1611 ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) ||
1612 ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) ||
1613 range_covers_byte(addr, l, PCI_COMMAND))
1614 pci_update_mappings(d);
1615
1616 if (ranges_overlap(addr, l, PCI_COMMAND, 2)) {
1617 pci_update_irq_disabled(d, was_irq_disabled);
1618 memory_region_set_enabled(&d->bus_master_enable_region,
1619 (pci_get_word(d->config + PCI_COMMAND)
1620 & PCI_COMMAND_MASTER) && d->has_power);
1621 }
1622
1623 msi_write_config(d, addr, val_in, l);
1624 msix_write_config(d, addr, val_in, l);
1625 pcie_sriov_config_write(d, addr, val_in, l);
1626 }
1627
1628 /***********************************************************/
1629 /* generic PCI irq support */
1630
1631 /* 0 <= irq_num <= 3. level must be 0 or 1 */
pci_irq_handler(void * opaque,int irq_num,int level)1632 static void pci_irq_handler(void *opaque, int irq_num, int level)
1633 {
1634 PCIDevice *pci_dev = opaque;
1635 int change;
1636
1637 assert(0 <= irq_num && irq_num < PCI_NUM_PINS);
1638 assert(level == 0 || level == 1);
1639 change = level - pci_irq_state(pci_dev, irq_num);
1640 if (!change)
1641 return;
1642
1643 pci_set_irq_state(pci_dev, irq_num, level);
1644 pci_update_irq_status(pci_dev);
1645 if (pci_irq_disabled(pci_dev))
1646 return;
1647 pci_change_irq_level(pci_dev, irq_num, change);
1648 }
1649
pci_allocate_irq(PCIDevice * pci_dev)1650 qemu_irq pci_allocate_irq(PCIDevice *pci_dev)
1651 {
1652 int intx = pci_intx(pci_dev);
1653 assert(0 <= intx && intx < PCI_NUM_PINS);
1654
1655 return qemu_allocate_irq(pci_irq_handler, pci_dev, intx);
1656 }
1657
pci_set_irq(PCIDevice * pci_dev,int level)1658 void pci_set_irq(PCIDevice *pci_dev, int level)
1659 {
1660 int intx = pci_intx(pci_dev);
1661 pci_irq_handler(pci_dev, intx, level);
1662 }
1663
1664 /* Special hooks used by device assignment */
pci_bus_set_route_irq_fn(PCIBus * bus,pci_route_irq_fn route_intx_to_irq)1665 void pci_bus_set_route_irq_fn(PCIBus *bus, pci_route_irq_fn route_intx_to_irq)
1666 {
1667 assert(pci_bus_is_root(bus));
1668 bus->route_intx_to_irq = route_intx_to_irq;
1669 }
1670
pci_device_route_intx_to_irq(PCIDevice * dev,int pin)1671 PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin)
1672 {
1673 PCIBus *bus;
1674
1675 do {
1676 int dev_irq = pin;
1677 bus = pci_get_bus(dev);
1678 pin = bus->map_irq(dev, pin);
1679 trace_pci_route_irq(dev_irq, DEVICE(dev)->canonical_path, pin,
1680 pci_bus_is_root(bus) ? "root-complex"
1681 : DEVICE(bus->parent_dev)->canonical_path);
1682 dev = bus->parent_dev;
1683 } while (dev);
1684
1685 if (!bus->route_intx_to_irq) {
1686 error_report("PCI: Bug - unimplemented PCI INTx routing (%s)",
1687 object_get_typename(OBJECT(bus->qbus.parent)));
1688 return (PCIINTxRoute) { PCI_INTX_DISABLED, -1 };
1689 }
1690
1691 return bus->route_intx_to_irq(bus->irq_opaque, pin);
1692 }
1693
pci_intx_route_changed(PCIINTxRoute * old,PCIINTxRoute * new)1694 bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new)
1695 {
1696 return old->mode != new->mode || old->irq != new->irq;
1697 }
1698
pci_bus_fire_intx_routing_notifier(PCIBus * bus)1699 void pci_bus_fire_intx_routing_notifier(PCIBus *bus)
1700 {
1701 PCIDevice *dev;
1702 PCIBus *sec;
1703 int i;
1704
1705 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
1706 dev = bus->devices[i];
1707 if (dev && dev->intx_routing_notifier) {
1708 dev->intx_routing_notifier(dev);
1709 }
1710 }
1711
1712 QLIST_FOREACH(sec, &bus->child, sibling) {
1713 pci_bus_fire_intx_routing_notifier(sec);
1714 }
1715 }
1716
pci_device_set_intx_routing_notifier(PCIDevice * dev,PCIINTxRoutingNotifier notifier)1717 void pci_device_set_intx_routing_notifier(PCIDevice *dev,
1718 PCIINTxRoutingNotifier notifier)
1719 {
1720 dev->intx_routing_notifier = notifier;
1721 }
1722
1723 /*
1724 * PCI-to-PCI bridge specification
1725 * 9.1: Interrupt routing. Table 9-1
1726 *
1727 * the PCI Express Base Specification, Revision 2.1
1728 * 2.2.8.1: INTx interrupt signaling - Rules
1729 * the Implementation Note
1730 * Table 2-20
1731 */
1732 /*
1733 * 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD
1734 * 0-origin unlike PCI interrupt pin register.
1735 */
pci_swizzle_map_irq_fn(PCIDevice * pci_dev,int pin)1736 int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin)
1737 {
1738 return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin);
1739 }
1740
1741 /***********************************************************/
1742 /* monitor info on PCI */
1743
1744 static const pci_class_desc pci_class_descriptions[] =
1745 {
1746 { 0x0001, "VGA controller", "display"},
1747 { 0x0100, "SCSI controller", "scsi"},
1748 { 0x0101, "IDE controller", "ide"},
1749 { 0x0102, "Floppy controller", "fdc"},
1750 { 0x0103, "IPI controller", "ipi"},
1751 { 0x0104, "RAID controller", "raid"},
1752 { 0x0106, "SATA controller"},
1753 { 0x0107, "SAS controller"},
1754 { 0x0180, "Storage controller"},
1755 { 0x0200, "Ethernet controller", "ethernet"},
1756 { 0x0201, "Token Ring controller", "token-ring"},
1757 { 0x0202, "FDDI controller", "fddi"},
1758 { 0x0203, "ATM controller", "atm"},
1759 { 0x0280, "Network controller"},
1760 { 0x0300, "VGA controller", "display", 0x00ff},
1761 { 0x0301, "XGA controller"},
1762 { 0x0302, "3D controller"},
1763 { 0x0380, "Display controller"},
1764 { 0x0400, "Video controller", "video"},
1765 { 0x0401, "Audio controller", "sound"},
1766 { 0x0402, "Phone"},
1767 { 0x0403, "Audio controller", "sound"},
1768 { 0x0480, "Multimedia controller"},
1769 { 0x0500, "RAM controller", "memory"},
1770 { 0x0501, "Flash controller", "flash"},
1771 { 0x0580, "Memory controller"},
1772 { 0x0600, "Host bridge", "host"},
1773 { 0x0601, "ISA bridge", "isa"},
1774 { 0x0602, "EISA bridge", "eisa"},
1775 { 0x0603, "MC bridge", "mca"},
1776 { 0x0604, "PCI bridge", "pci-bridge"},
1777 { 0x0605, "PCMCIA bridge", "pcmcia"},
1778 { 0x0606, "NUBUS bridge", "nubus"},
1779 { 0x0607, "CARDBUS bridge", "cardbus"},
1780 { 0x0608, "RACEWAY bridge"},
1781 { 0x0680, "Bridge"},
1782 { 0x0700, "Serial port", "serial"},
1783 { 0x0701, "Parallel port", "parallel"},
1784 { 0x0800, "Interrupt controller", "interrupt-controller"},
1785 { 0x0801, "DMA controller", "dma-controller"},
1786 { 0x0802, "Timer", "timer"},
1787 { 0x0803, "RTC", "rtc"},
1788 { 0x0900, "Keyboard", "keyboard"},
1789 { 0x0901, "Pen", "pen"},
1790 { 0x0902, "Mouse", "mouse"},
1791 { 0x0A00, "Dock station", "dock", 0x00ff},
1792 { 0x0B00, "i386 cpu", "cpu", 0x00ff},
1793 { 0x0c00, "Firewire controller", "firewire"},
1794 { 0x0c01, "Access bus controller", "access-bus"},
1795 { 0x0c02, "SSA controller", "ssa"},
1796 { 0x0c03, "USB controller", "usb"},
1797 { 0x0c04, "Fibre channel controller", "fibre-channel"},
1798 { 0x0c05, "SMBus"},
1799 { 0, NULL}
1800 };
1801
pci_for_each_device_under_bus_reverse(PCIBus * bus,pci_bus_dev_fn fn,void * opaque)1802 void pci_for_each_device_under_bus_reverse(PCIBus *bus,
1803 pci_bus_dev_fn fn,
1804 void *opaque)
1805 {
1806 PCIDevice *d;
1807 int devfn;
1808
1809 for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
1810 d = bus->devices[ARRAY_SIZE(bus->devices) - 1 - devfn];
1811 if (d) {
1812 fn(bus, d, opaque);
1813 }
1814 }
1815 }
1816
pci_for_each_device_reverse(PCIBus * bus,int bus_num,pci_bus_dev_fn fn,void * opaque)1817 void pci_for_each_device_reverse(PCIBus *bus, int bus_num,
1818 pci_bus_dev_fn fn, void *opaque)
1819 {
1820 bus = pci_find_bus_nr(bus, bus_num);
1821
1822 if (bus) {
1823 pci_for_each_device_under_bus_reverse(bus, fn, opaque);
1824 }
1825 }
1826
pci_for_each_device_under_bus(PCIBus * bus,pci_bus_dev_fn fn,void * opaque)1827 void pci_for_each_device_under_bus(PCIBus *bus,
1828 pci_bus_dev_fn fn, void *opaque)
1829 {
1830 PCIDevice *d;
1831 int devfn;
1832
1833 for(devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
1834 d = bus->devices[devfn];
1835 if (d) {
1836 fn(bus, d, opaque);
1837 }
1838 }
1839 }
1840
pci_for_each_device(PCIBus * bus,int bus_num,pci_bus_dev_fn fn,void * opaque)1841 void pci_for_each_device(PCIBus *bus, int bus_num,
1842 pci_bus_dev_fn fn, void *opaque)
1843 {
1844 bus = pci_find_bus_nr(bus, bus_num);
1845
1846 if (bus) {
1847 pci_for_each_device_under_bus(bus, fn, opaque);
1848 }
1849 }
1850
get_class_desc(int class)1851 const pci_class_desc *get_class_desc(int class)
1852 {
1853 const pci_class_desc *desc;
1854
1855 desc = pci_class_descriptions;
1856 while (desc->desc && class != desc->class) {
1857 desc++;
1858 }
1859
1860 return desc;
1861 }
1862
pci_init_nic_devices(PCIBus * bus,const char * default_model)1863 void pci_init_nic_devices(PCIBus *bus, const char *default_model)
1864 {
1865 qemu_create_nic_bus_devices(&bus->qbus, TYPE_PCI_DEVICE, default_model,
1866 "virtio", "virtio-net-pci");
1867 }
1868
pci_init_nic_in_slot(PCIBus * rootbus,const char * model,const char * alias,const char * devaddr)1869 bool pci_init_nic_in_slot(PCIBus *rootbus, const char *model,
1870 const char *alias, const char *devaddr)
1871 {
1872 NICInfo *nd = qemu_find_nic_info(model, true, alias);
1873 int dom, busnr, devfn;
1874 PCIDevice *pci_dev;
1875 unsigned slot;
1876 PCIBus *bus;
1877
1878 if (!nd) {
1879 return false;
1880 }
1881
1882 if (!devaddr || pci_parse_devaddr(devaddr, &dom, &busnr, &slot, NULL) < 0) {
1883 error_report("Invalid PCI device address %s for device %s",
1884 devaddr, model);
1885 exit(1);
1886 }
1887
1888 if (dom != 0) {
1889 error_report("No support for non-zero PCI domains");
1890 exit(1);
1891 }
1892
1893 devfn = PCI_DEVFN(slot, 0);
1894
1895 bus = pci_find_bus_nr(rootbus, busnr);
1896 if (!bus) {
1897 error_report("Invalid PCI device address %s for device %s",
1898 devaddr, model);
1899 exit(1);
1900 }
1901
1902 pci_dev = pci_new(devfn, model);
1903 qdev_set_nic_properties(&pci_dev->qdev, nd);
1904 pci_realize_and_unref(pci_dev, bus, &error_fatal);
1905 return true;
1906 }
1907
pci_vga_init(PCIBus * bus)1908 PCIDevice *pci_vga_init(PCIBus *bus)
1909 {
1910 vga_interface_created = true;
1911 switch (vga_interface_type) {
1912 case VGA_CIRRUS:
1913 return pci_create_simple(bus, -1, "cirrus-vga");
1914 case VGA_QXL:
1915 return pci_create_simple(bus, -1, "qxl-vga");
1916 case VGA_STD:
1917 return pci_create_simple(bus, -1, "VGA");
1918 case VGA_VMWARE:
1919 return pci_create_simple(bus, -1, "vmware-svga");
1920 case VGA_VIRTIO:
1921 return pci_create_simple(bus, -1, "virtio-vga");
1922 case VGA_NONE:
1923 default: /* Other non-PCI types. Checking for unsupported types is already
1924 done in vl.c. */
1925 return NULL;
1926 }
1927 }
1928
1929 /* Whether a given bus number is in range of the secondary
1930 * bus of the given bridge device. */
pci_secondary_bus_in_range(PCIDevice * dev,int bus_num)1931 static bool pci_secondary_bus_in_range(PCIDevice *dev, int bus_num)
1932 {
1933 return !(pci_get_word(dev->config + PCI_BRIDGE_CONTROL) &
1934 PCI_BRIDGE_CTL_BUS_RESET) /* Don't walk the bus if it's reset. */ &&
1935 dev->config[PCI_SECONDARY_BUS] <= bus_num &&
1936 bus_num <= dev->config[PCI_SUBORDINATE_BUS];
1937 }
1938
1939 /* Whether a given bus number is in a range of a root bus */
pci_root_bus_in_range(PCIBus * bus,int bus_num)1940 static bool pci_root_bus_in_range(PCIBus *bus, int bus_num)
1941 {
1942 int i;
1943
1944 for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
1945 PCIDevice *dev = bus->devices[i];
1946
1947 if (dev && IS_PCI_BRIDGE(dev)) {
1948 if (pci_secondary_bus_in_range(dev, bus_num)) {
1949 return true;
1950 }
1951 }
1952 }
1953
1954 return false;
1955 }
1956
pci_find_bus_nr(PCIBus * bus,int bus_num)1957 PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num)
1958 {
1959 PCIBus *sec;
1960
1961 if (!bus) {
1962 return NULL;
1963 }
1964
1965 if (pci_bus_num(bus) == bus_num) {
1966 return bus;
1967 }
1968
1969 /* Consider all bus numbers in range for the host pci bridge. */
1970 if (!pci_bus_is_root(bus) &&
1971 !pci_secondary_bus_in_range(bus->parent_dev, bus_num)) {
1972 return NULL;
1973 }
1974
1975 /* try child bus */
1976 for (; bus; bus = sec) {
1977 QLIST_FOREACH(sec, &bus->child, sibling) {
1978 if (pci_bus_num(sec) == bus_num) {
1979 return sec;
1980 }
1981 /* PXB buses assumed to be children of bus 0 */
1982 if (pci_bus_is_root(sec)) {
1983 if (pci_root_bus_in_range(sec, bus_num)) {
1984 break;
1985 }
1986 } else {
1987 if (pci_secondary_bus_in_range(sec->parent_dev, bus_num)) {
1988 break;
1989 }
1990 }
1991 }
1992 }
1993
1994 return NULL;
1995 }
1996
pci_for_each_bus_depth_first(PCIBus * bus,pci_bus_ret_fn begin,pci_bus_fn end,void * parent_state)1997 void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin,
1998 pci_bus_fn end, void *parent_state)
1999 {
2000 PCIBus *sec;
2001 void *state;
2002
2003 if (!bus) {
2004 return;
2005 }
2006
2007 if (begin) {
2008 state = begin(bus, parent_state);
2009 } else {
2010 state = parent_state;
2011 }
2012
2013 QLIST_FOREACH(sec, &bus->child, sibling) {
2014 pci_for_each_bus_depth_first(sec, begin, end, state);
2015 }
2016
2017 if (end) {
2018 end(bus, state);
2019 }
2020 }
2021
2022
pci_find_device(PCIBus * bus,int bus_num,uint8_t devfn)2023 PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn)
2024 {
2025 bus = pci_find_bus_nr(bus, bus_num);
2026
2027 if (!bus)
2028 return NULL;
2029
2030 return bus->devices[devfn];
2031 }
2032
2033 #define ONBOARD_INDEX_MAX (16 * 1024 - 1)
2034
pci_qdev_realize(DeviceState * qdev,Error ** errp)2035 static void pci_qdev_realize(DeviceState *qdev, Error **errp)
2036 {
2037 PCIDevice *pci_dev = (PCIDevice *)qdev;
2038 PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
2039 ObjectClass *klass = OBJECT_CLASS(pc);
2040 Error *local_err = NULL;
2041 bool is_default_rom;
2042 uint16_t class_id;
2043
2044 /*
2045 * capped by systemd (see: udev-builtin-net_id.c)
2046 * as it's the only known user honor it to avoid users
2047 * misconfigure QEMU and then wonder why acpi-index doesn't work
2048 */
2049 if (pci_dev->acpi_index > ONBOARD_INDEX_MAX) {
2050 error_setg(errp, "acpi-index should be less or equal to %u",
2051 ONBOARD_INDEX_MAX);
2052 return;
2053 }
2054
2055 /*
2056 * make sure that acpi-index is unique across all present PCI devices
2057 */
2058 if (pci_dev->acpi_index) {
2059 GSequence *used_indexes = pci_acpi_index_list();
2060
2061 if (g_sequence_lookup(used_indexes,
2062 GINT_TO_POINTER(pci_dev->acpi_index),
2063 g_cmp_uint32, NULL)) {
2064 error_setg(errp, "a PCI device with acpi-index = %" PRIu32
2065 " already exist", pci_dev->acpi_index);
2066 return;
2067 }
2068 g_sequence_insert_sorted(used_indexes,
2069 GINT_TO_POINTER(pci_dev->acpi_index),
2070 g_cmp_uint32, NULL);
2071 }
2072
2073 if (pci_dev->romsize != UINT32_MAX && !is_power_of_2(pci_dev->romsize)) {
2074 error_setg(errp, "ROM size %u is not a power of two", pci_dev->romsize);
2075 return;
2076 }
2077
2078 /* initialize cap_present for pci_is_express() and pci_config_size(),
2079 * Note that hybrid PCIs are not set automatically and need to manage
2080 * QEMU_PCI_CAP_EXPRESS manually */
2081 if (object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE) &&
2082 !object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE)) {
2083 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
2084 }
2085
2086 if (object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE)) {
2087 pci_dev->cap_present |= QEMU_PCIE_CAP_CXL;
2088 }
2089
2090 pci_dev = do_pci_register_device(pci_dev,
2091 object_get_typename(OBJECT(qdev)),
2092 pci_dev->devfn, errp);
2093 if (pci_dev == NULL)
2094 return;
2095
2096 if (pc->realize) {
2097 pc->realize(pci_dev, &local_err);
2098 if (local_err) {
2099 error_propagate(errp, local_err);
2100 do_pci_unregister_device(pci_dev);
2101 return;
2102 }
2103 }
2104
2105 /*
2106 * A PCIe Downstream Port that do not have ARI Forwarding enabled must
2107 * associate only Device 0 with the device attached to the bus
2108 * representing the Link from the Port (PCIe base spec rev 4.0 ver 0.3,
2109 * sec 7.3.1).
2110 * With ARI, PCI_SLOT() can return non-zero value as the traditional
2111 * 5-bit Device Number and 3-bit Function Number fields in its associated
2112 * Routing IDs, Requester IDs and Completer IDs are interpreted as a
2113 * single 8-bit Function Number. Hence, ignore ARI capable devices.
2114 */
2115 if (pci_is_express(pci_dev) &&
2116 !pcie_find_capability(pci_dev, PCI_EXT_CAP_ID_ARI) &&
2117 pcie_has_upstream_port(pci_dev) &&
2118 PCI_SLOT(pci_dev->devfn)) {
2119 warn_report("PCI: slot %d is not valid for %s,"
2120 " parent device only allows plugging into slot 0.",
2121 PCI_SLOT(pci_dev->devfn), pci_dev->name);
2122 }
2123
2124 if (pci_dev->failover_pair_id) {
2125 if (!pci_bus_is_express(pci_get_bus(pci_dev))) {
2126 error_setg(errp, "failover primary device must be on "
2127 "PCIExpress bus");
2128 pci_qdev_unrealize(DEVICE(pci_dev));
2129 return;
2130 }
2131 class_id = pci_get_word(pci_dev->config + PCI_CLASS_DEVICE);
2132 if (class_id != PCI_CLASS_NETWORK_ETHERNET) {
2133 error_setg(errp, "failover primary device is not an "
2134 "Ethernet device");
2135 pci_qdev_unrealize(DEVICE(pci_dev));
2136 return;
2137 }
2138 if ((pci_dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)
2139 || (PCI_FUNC(pci_dev->devfn) != 0)) {
2140 error_setg(errp, "failover: primary device must be in its own "
2141 "PCI slot");
2142 pci_qdev_unrealize(DEVICE(pci_dev));
2143 return;
2144 }
2145 qdev->allow_unplug_during_migration = true;
2146 }
2147
2148 /* rom loading */
2149 is_default_rom = false;
2150 if (pci_dev->romfile == NULL && pc->romfile != NULL) {
2151 pci_dev->romfile = g_strdup(pc->romfile);
2152 is_default_rom = true;
2153 }
2154
2155 pci_add_option_rom(pci_dev, is_default_rom, &local_err);
2156 if (local_err) {
2157 error_propagate(errp, local_err);
2158 pci_qdev_unrealize(DEVICE(pci_dev));
2159 return;
2160 }
2161
2162 pci_set_power(pci_dev, true);
2163
2164 pci_dev->msi_trigger = pci_msi_trigger;
2165 }
2166
pci_new_internal(int devfn,bool multifunction,const char * name)2167 static PCIDevice *pci_new_internal(int devfn, bool multifunction,
2168 const char *name)
2169 {
2170 DeviceState *dev;
2171
2172 dev = qdev_new(name);
2173 qdev_prop_set_int32(dev, "addr", devfn);
2174 qdev_prop_set_bit(dev, "multifunction", multifunction);
2175 return PCI_DEVICE(dev);
2176 }
2177
pci_new_multifunction(int devfn,const char * name)2178 PCIDevice *pci_new_multifunction(int devfn, const char *name)
2179 {
2180 return pci_new_internal(devfn, true, name);
2181 }
2182
pci_new(int devfn,const char * name)2183 PCIDevice *pci_new(int devfn, const char *name)
2184 {
2185 return pci_new_internal(devfn, false, name);
2186 }
2187
pci_realize_and_unref(PCIDevice * dev,PCIBus * bus,Error ** errp)2188 bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp)
2189 {
2190 return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp);
2191 }
2192
pci_create_simple_multifunction(PCIBus * bus,int devfn,const char * name)2193 PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn,
2194 const char *name)
2195 {
2196 PCIDevice *dev = pci_new_multifunction(devfn, name);
2197 pci_realize_and_unref(dev, bus, &error_fatal);
2198 return dev;
2199 }
2200
pci_create_simple(PCIBus * bus,int devfn,const char * name)2201 PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name)
2202 {
2203 PCIDevice *dev = pci_new(devfn, name);
2204 pci_realize_and_unref(dev, bus, &error_fatal);
2205 return dev;
2206 }
2207
pci_find_space(PCIDevice * pdev,uint8_t size)2208 static uint8_t pci_find_space(PCIDevice *pdev, uint8_t size)
2209 {
2210 int offset = PCI_CONFIG_HEADER_SIZE;
2211 int i;
2212 for (i = PCI_CONFIG_HEADER_SIZE; i < PCI_CONFIG_SPACE_SIZE; ++i) {
2213 if (pdev->used[i])
2214 offset = i + 1;
2215 else if (i - offset + 1 == size)
2216 return offset;
2217 }
2218 return 0;
2219 }
2220
pci_find_capability_list(PCIDevice * pdev,uint8_t cap_id,uint8_t * prev_p)2221 static uint8_t pci_find_capability_list(PCIDevice *pdev, uint8_t cap_id,
2222 uint8_t *prev_p)
2223 {
2224 uint8_t next, prev;
2225
2226 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST))
2227 return 0;
2228
2229 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]);
2230 prev = next + PCI_CAP_LIST_NEXT)
2231 if (pdev->config[next + PCI_CAP_LIST_ID] == cap_id)
2232 break;
2233
2234 if (prev_p)
2235 *prev_p = prev;
2236 return next;
2237 }
2238
pci_find_capability_at_offset(PCIDevice * pdev,uint8_t offset)2239 static uint8_t pci_find_capability_at_offset(PCIDevice *pdev, uint8_t offset)
2240 {
2241 uint8_t next, prev, found = 0;
2242
2243 if (!(pdev->used[offset])) {
2244 return 0;
2245 }
2246
2247 assert(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST);
2248
2249 for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]);
2250 prev = next + PCI_CAP_LIST_NEXT) {
2251 if (next <= offset && next > found) {
2252 found = next;
2253 }
2254 }
2255 return found;
2256 }
2257
2258 /* Patch the PCI vendor and device ids in a PCI rom image if necessary.
2259 This is needed for an option rom which is used for more than one device. */
pci_patch_ids(PCIDevice * pdev,uint8_t * ptr,uint32_t size)2260 static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size)
2261 {
2262 uint16_t vendor_id;
2263 uint16_t device_id;
2264 uint16_t rom_vendor_id;
2265 uint16_t rom_device_id;
2266 uint16_t rom_magic;
2267 uint16_t pcir_offset;
2268 uint8_t checksum;
2269
2270 /* Words in rom data are little endian (like in PCI configuration),
2271 so they can be read / written with pci_get_word / pci_set_word. */
2272
2273 /* Only a valid rom will be patched. */
2274 rom_magic = pci_get_word(ptr);
2275 if (rom_magic != 0xaa55) {
2276 PCI_DPRINTF("Bad ROM magic %04x\n", rom_magic);
2277 return;
2278 }
2279 pcir_offset = pci_get_word(ptr + 0x18);
2280 if (pcir_offset + 8 >= size || memcmp(ptr + pcir_offset, "PCIR", 4)) {
2281 PCI_DPRINTF("Bad PCIR offset 0x%x or signature\n", pcir_offset);
2282 return;
2283 }
2284
2285 vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
2286 device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
2287 rom_vendor_id = pci_get_word(ptr + pcir_offset + 4);
2288 rom_device_id = pci_get_word(ptr + pcir_offset + 6);
2289
2290 PCI_DPRINTF("%s: ROM id %04x%04x / PCI id %04x%04x\n", pdev->romfile,
2291 vendor_id, device_id, rom_vendor_id, rom_device_id);
2292
2293 checksum = ptr[6];
2294
2295 if (vendor_id != rom_vendor_id) {
2296 /* Patch vendor id and checksum (at offset 6 for etherboot roms). */
2297 checksum += (uint8_t)rom_vendor_id + (uint8_t)(rom_vendor_id >> 8);
2298 checksum -= (uint8_t)vendor_id + (uint8_t)(vendor_id >> 8);
2299 PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum);
2300 ptr[6] = checksum;
2301 pci_set_word(ptr + pcir_offset + 4, vendor_id);
2302 }
2303
2304 if (device_id != rom_device_id) {
2305 /* Patch device id and checksum (at offset 6 for etherboot roms). */
2306 checksum += (uint8_t)rom_device_id + (uint8_t)(rom_device_id >> 8);
2307 checksum -= (uint8_t)device_id + (uint8_t)(device_id >> 8);
2308 PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum);
2309 ptr[6] = checksum;
2310 pci_set_word(ptr + pcir_offset + 6, device_id);
2311 }
2312 }
2313
2314 /* Add an option rom for the device */
pci_add_option_rom(PCIDevice * pdev,bool is_default_rom,Error ** errp)2315 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom,
2316 Error **errp)
2317 {
2318 int64_t size = 0;
2319 g_autofree char *path = NULL;
2320 char name[32];
2321 const VMStateDescription *vmsd;
2322
2323 /*
2324 * In case of incoming migration ROM will come with migration stream, no
2325 * reason to load the file. Neither we want to fail if local ROM file
2326 * mismatches with specified romsize.
2327 */
2328 bool load_file = !runstate_check(RUN_STATE_INMIGRATE);
2329
2330 if (!pdev->romfile || !strlen(pdev->romfile)) {
2331 return;
2332 }
2333
2334 if (!pdev->rom_bar) {
2335 /*
2336 * Load rom via fw_cfg instead of creating a rom bar,
2337 * for 0.11 compatibility.
2338 */
2339 int class = pci_get_word(pdev->config + PCI_CLASS_DEVICE);
2340
2341 /*
2342 * Hot-plugged devices can't use the option ROM
2343 * if the rom bar is disabled.
2344 */
2345 if (DEVICE(pdev)->hotplugged) {
2346 error_setg(errp, "Hot-plugged device without ROM bar"
2347 " can't have an option ROM");
2348 return;
2349 }
2350
2351 if (class == 0x0300) {
2352 rom_add_vga(pdev->romfile);
2353 } else {
2354 rom_add_option(pdev->romfile, -1);
2355 }
2356 return;
2357 }
2358
2359 if (load_file || pdev->romsize == UINT32_MAX) {
2360 path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile);
2361 if (path == NULL) {
2362 path = g_strdup(pdev->romfile);
2363 }
2364
2365 size = get_image_size(path);
2366 if (size < 0) {
2367 error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile);
2368 return;
2369 } else if (size == 0) {
2370 error_setg(errp, "romfile \"%s\" is empty", pdev->romfile);
2371 return;
2372 } else if (size > 2 * GiB) {
2373 error_setg(errp,
2374 "romfile \"%s\" too large (size cannot exceed 2 GiB)",
2375 pdev->romfile);
2376 return;
2377 }
2378 if (pdev->romsize != UINT_MAX) {
2379 if (size > pdev->romsize) {
2380 error_setg(errp, "romfile \"%s\" (%u bytes) "
2381 "is too large for ROM size %u",
2382 pdev->romfile, (uint32_t)size, pdev->romsize);
2383 return;
2384 }
2385 } else {
2386 pdev->romsize = pow2ceil(size);
2387 }
2388 }
2389
2390 vmsd = qdev_get_vmsd(DEVICE(pdev));
2391 snprintf(name, sizeof(name), "%s.rom",
2392 vmsd ? vmsd->name : object_get_typename(OBJECT(pdev)));
2393
2394 pdev->has_rom = true;
2395 memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize,
2396 &error_fatal);
2397
2398 if (load_file) {
2399 void *ptr = memory_region_get_ram_ptr(&pdev->rom);
2400
2401 if (load_image_size(path, ptr, size) < 0) {
2402 error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile);
2403 return;
2404 }
2405
2406 if (is_default_rom) {
2407 /* Only the default rom images will be patched (if needed). */
2408 pci_patch_ids(pdev, ptr, size);
2409 }
2410 }
2411
2412 pci_register_bar(pdev, PCI_ROM_SLOT, 0, &pdev->rom);
2413 }
2414
pci_del_option_rom(PCIDevice * pdev)2415 static void pci_del_option_rom(PCIDevice *pdev)
2416 {
2417 if (!pdev->has_rom)
2418 return;
2419
2420 vmstate_unregister_ram(&pdev->rom, &pdev->qdev);
2421 pdev->has_rom = false;
2422 }
2423
2424 /*
2425 * On success, pci_add_capability() returns a positive value
2426 * that the offset of the pci capability.
2427 * On failure, it sets an error and returns a negative error
2428 * code.
2429 */
pci_add_capability(PCIDevice * pdev,uint8_t cap_id,uint8_t offset,uint8_t size,Error ** errp)2430 int pci_add_capability(PCIDevice *pdev, uint8_t cap_id,
2431 uint8_t offset, uint8_t size,
2432 Error **errp)
2433 {
2434 uint8_t *config;
2435 int i, overlapping_cap;
2436
2437 if (!offset) {
2438 offset = pci_find_space(pdev, size);
2439 /* out of PCI config space is programming error */
2440 assert(offset);
2441 } else {
2442 /* Verify that capabilities don't overlap. Note: device assignment
2443 * depends on this check to verify that the device is not broken.
2444 * Should never trigger for emulated devices, but it's helpful
2445 * for debugging these. */
2446 for (i = offset; i < offset + size; i++) {
2447 overlapping_cap = pci_find_capability_at_offset(pdev, i);
2448 if (overlapping_cap) {
2449 error_setg(errp, "%s:%02x:%02x.%x "
2450 "Attempt to add PCI capability %x at offset "
2451 "%x overlaps existing capability %x at offset %x",
2452 pci_root_bus_path(pdev), pci_dev_bus_num(pdev),
2453 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2454 cap_id, offset, overlapping_cap, i);
2455 return -EINVAL;
2456 }
2457 }
2458 }
2459
2460 config = pdev->config + offset;
2461 config[PCI_CAP_LIST_ID] = cap_id;
2462 config[PCI_CAP_LIST_NEXT] = pdev->config[PCI_CAPABILITY_LIST];
2463 pdev->config[PCI_CAPABILITY_LIST] = offset;
2464 pdev->config[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
2465 memset(pdev->used + offset, 0xFF, QEMU_ALIGN_UP(size, 4));
2466 /* Make capability read-only by default */
2467 memset(pdev->wmask + offset, 0, size);
2468 /* Check capability by default */
2469 memset(pdev->cmask + offset, 0xFF, size);
2470 return offset;
2471 }
2472
2473 /* Unlink capability from the pci config space. */
pci_del_capability(PCIDevice * pdev,uint8_t cap_id,uint8_t size)2474 void pci_del_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t size)
2475 {
2476 uint8_t prev, offset = pci_find_capability_list(pdev, cap_id, &prev);
2477 if (!offset)
2478 return;
2479 pdev->config[prev] = pdev->config[offset + PCI_CAP_LIST_NEXT];
2480 /* Make capability writable again */
2481 memset(pdev->wmask + offset, 0xff, size);
2482 memset(pdev->w1cmask + offset, 0, size);
2483 /* Clear cmask as device-specific registers can't be checked */
2484 memset(pdev->cmask + offset, 0, size);
2485 memset(pdev->used + offset, 0, QEMU_ALIGN_UP(size, 4));
2486
2487 if (!pdev->config[PCI_CAPABILITY_LIST])
2488 pdev->config[PCI_STATUS] &= ~PCI_STATUS_CAP_LIST;
2489 }
2490
pci_find_capability(PCIDevice * pdev,uint8_t cap_id)2491 uint8_t pci_find_capability(PCIDevice *pdev, uint8_t cap_id)
2492 {
2493 return pci_find_capability_list(pdev, cap_id, NULL);
2494 }
2495
pci_dev_fw_name(DeviceState * dev,char * buf,int len)2496 static char *pci_dev_fw_name(DeviceState *dev, char *buf, int len)
2497 {
2498 PCIDevice *d = (PCIDevice *)dev;
2499 const char *name = NULL;
2500 const pci_class_desc *desc = pci_class_descriptions;
2501 int class = pci_get_word(d->config + PCI_CLASS_DEVICE);
2502
2503 while (desc->desc &&
2504 (class & ~desc->fw_ign_bits) !=
2505 (desc->class & ~desc->fw_ign_bits)) {
2506 desc++;
2507 }
2508
2509 if (desc->desc) {
2510 name = desc->fw_name;
2511 }
2512
2513 if (name) {
2514 pstrcpy(buf, len, name);
2515 } else {
2516 snprintf(buf, len, "pci%04x,%04x",
2517 pci_get_word(d->config + PCI_VENDOR_ID),
2518 pci_get_word(d->config + PCI_DEVICE_ID));
2519 }
2520
2521 return buf;
2522 }
2523
pcibus_get_fw_dev_path(DeviceState * dev)2524 static char *pcibus_get_fw_dev_path(DeviceState *dev)
2525 {
2526 PCIDevice *d = (PCIDevice *)dev;
2527 char name[33];
2528 int has_func = !!PCI_FUNC(d->devfn);
2529
2530 return g_strdup_printf("%s@%x%s%.*x",
2531 pci_dev_fw_name(dev, name, sizeof(name)),
2532 PCI_SLOT(d->devfn),
2533 has_func ? "," : "",
2534 has_func,
2535 PCI_FUNC(d->devfn));
2536 }
2537
pcibus_get_dev_path(DeviceState * dev)2538 static char *pcibus_get_dev_path(DeviceState *dev)
2539 {
2540 PCIDevice *d = container_of(dev, PCIDevice, qdev);
2541 PCIDevice *t;
2542 int slot_depth;
2543 /* Path format: Domain:00:Slot.Function:Slot.Function....:Slot.Function.
2544 * 00 is added here to make this format compatible with
2545 * domain:Bus:Slot.Func for systems without nested PCI bridges.
2546 * Slot.Function list specifies the slot and function numbers for all
2547 * devices on the path from root to the specific device. */
2548 const char *root_bus_path;
2549 int root_bus_len;
2550 char slot[] = ":SS.F";
2551 int slot_len = sizeof slot - 1 /* For '\0' */;
2552 int path_len;
2553 char *path, *p;
2554 int s;
2555
2556 root_bus_path = pci_root_bus_path(d);
2557 root_bus_len = strlen(root_bus_path);
2558
2559 /* Calculate # of slots on path between device and root. */;
2560 slot_depth = 0;
2561 for (t = d; t; t = pci_get_bus(t)->parent_dev) {
2562 ++slot_depth;
2563 }
2564
2565 path_len = root_bus_len + slot_len * slot_depth;
2566
2567 /* Allocate memory, fill in the terminating null byte. */
2568 path = g_malloc(path_len + 1 /* For '\0' */);
2569 path[path_len] = '\0';
2570
2571 memcpy(path, root_bus_path, root_bus_len);
2572
2573 /* Fill in slot numbers. We walk up from device to root, so need to print
2574 * them in the reverse order, last to first. */
2575 p = path + path_len;
2576 for (t = d; t; t = pci_get_bus(t)->parent_dev) {
2577 p -= slot_len;
2578 s = snprintf(slot, sizeof slot, ":%02x.%x",
2579 PCI_SLOT(t->devfn), PCI_FUNC(t->devfn));
2580 assert(s == slot_len);
2581 memcpy(p, slot, slot_len);
2582 }
2583
2584 return path;
2585 }
2586
pci_qdev_find_recursive(PCIBus * bus,const char * id,PCIDevice ** pdev)2587 static int pci_qdev_find_recursive(PCIBus *bus,
2588 const char *id, PCIDevice **pdev)
2589 {
2590 DeviceState *qdev = qdev_find_recursive(&bus->qbus, id);
2591 if (!qdev) {
2592 return -ENODEV;
2593 }
2594
2595 /* roughly check if given qdev is pci device */
2596 if (object_dynamic_cast(OBJECT(qdev), TYPE_PCI_DEVICE)) {
2597 *pdev = PCI_DEVICE(qdev);
2598 return 0;
2599 }
2600 return -EINVAL;
2601 }
2602
pci_qdev_find_device(const char * id,PCIDevice ** pdev)2603 int pci_qdev_find_device(const char *id, PCIDevice **pdev)
2604 {
2605 PCIHostState *host_bridge;
2606 int rc = -ENODEV;
2607
2608 QLIST_FOREACH(host_bridge, &pci_host_bridges, next) {
2609 int tmp = pci_qdev_find_recursive(host_bridge->bus, id, pdev);
2610 if (!tmp) {
2611 rc = 0;
2612 break;
2613 }
2614 if (tmp != -ENODEV) {
2615 rc = tmp;
2616 }
2617 }
2618
2619 return rc;
2620 }
2621
pci_address_space(PCIDevice * dev)2622 MemoryRegion *pci_address_space(PCIDevice *dev)
2623 {
2624 return pci_get_bus(dev)->address_space_mem;
2625 }
2626
pci_address_space_io(PCIDevice * dev)2627 MemoryRegion *pci_address_space_io(PCIDevice *dev)
2628 {
2629 return pci_get_bus(dev)->address_space_io;
2630 }
2631
pci_device_class_init(ObjectClass * klass,void * data)2632 static void pci_device_class_init(ObjectClass *klass, void *data)
2633 {
2634 DeviceClass *k = DEVICE_CLASS(klass);
2635
2636 k->realize = pci_qdev_realize;
2637 k->unrealize = pci_qdev_unrealize;
2638 k->bus_type = TYPE_PCI_BUS;
2639 device_class_set_props(k, pci_props);
2640 object_class_property_set_description(
2641 klass, "x-max-bounce-buffer-size",
2642 "Maximum buffer size allocated for bounce buffers used for mapped "
2643 "access to indirect DMA memory");
2644 }
2645
pci_device_class_base_init(ObjectClass * klass,void * data)2646 static void pci_device_class_base_init(ObjectClass *klass, void *data)
2647 {
2648 if (!object_class_is_abstract(klass)) {
2649 ObjectClass *conventional =
2650 object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE);
2651 ObjectClass *pcie =
2652 object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE);
2653 ObjectClass *cxl =
2654 object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE);
2655 assert(conventional || pcie || cxl);
2656 }
2657 }
2658
2659 /*
2660 * Get IOMMU root bus, aliased bus and devfn of a PCI device
2661 *
2662 * IOMMU root bus is needed by all call sites to call into iommu_ops.
2663 * For call sites which don't need aliased BDF, passing NULL to
2664 * aliased_[bus|devfn] is allowed.
2665 *
2666 * @piommu_bus: return root #PCIBus backed by an IOMMU for the PCI device.
2667 *
2668 * @aliased_bus: return aliased #PCIBus of the PCI device, optional.
2669 *
2670 * @aliased_devfn: return aliased devfn of the PCI device, optional.
2671 */
pci_device_get_iommu_bus_devfn(PCIDevice * dev,PCIBus ** piommu_bus,PCIBus ** aliased_bus,int * aliased_devfn)2672 static void pci_device_get_iommu_bus_devfn(PCIDevice *dev,
2673 PCIBus **piommu_bus,
2674 PCIBus **aliased_bus,
2675 int *aliased_devfn)
2676 {
2677 PCIBus *bus = pci_get_bus(dev);
2678 PCIBus *iommu_bus = bus;
2679 int devfn = dev->devfn;
2680
2681 while (iommu_bus && !iommu_bus->iommu_ops && iommu_bus->parent_dev) {
2682 PCIBus *parent_bus = pci_get_bus(iommu_bus->parent_dev);
2683
2684 /*
2685 * The requester ID of the provided device may be aliased, as seen from
2686 * the IOMMU, due to topology limitations. The IOMMU relies on a
2687 * requester ID to provide a unique AddressSpace for devices, but
2688 * conventional PCI buses pre-date such concepts. Instead, the PCIe-
2689 * to-PCI bridge creates and accepts transactions on behalf of down-
2690 * stream devices. When doing so, all downstream devices are masked
2691 * (aliased) behind a single requester ID. The requester ID used
2692 * depends on the format of the bridge devices. Proper PCIe-to-PCI
2693 * bridges, with a PCIe capability indicating such, follow the
2694 * guidelines of chapter 2.3 of the PCIe-to-PCI/X bridge specification,
2695 * where the bridge uses the seconary bus as the bridge portion of the
2696 * requester ID and devfn of 00.0. For other bridges, typically those
2697 * found on the root complex such as the dmi-to-pci-bridge, we follow
2698 * the convention of typical bare-metal hardware, which uses the
2699 * requester ID of the bridge itself. There are device specific
2700 * exceptions to these rules, but these are the defaults that the
2701 * Linux kernel uses when determining DMA aliases itself and believed
2702 * to be true for the bare metal equivalents of the devices emulated
2703 * in QEMU.
2704 */
2705 if (!pci_bus_is_express(iommu_bus)) {
2706 PCIDevice *parent = iommu_bus->parent_dev;
2707
2708 if (pci_is_express(parent) &&
2709 pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) {
2710 devfn = PCI_DEVFN(0, 0);
2711 bus = iommu_bus;
2712 } else {
2713 devfn = parent->devfn;
2714 bus = parent_bus;
2715 }
2716 }
2717
2718 iommu_bus = parent_bus;
2719 }
2720
2721 assert(0 <= devfn && devfn < PCI_DEVFN_MAX);
2722 assert(iommu_bus);
2723
2724 if (pci_bus_bypass_iommu(bus) || !iommu_bus->iommu_ops) {
2725 iommu_bus = NULL;
2726 }
2727
2728 *piommu_bus = iommu_bus;
2729
2730 if (aliased_bus) {
2731 *aliased_bus = bus;
2732 }
2733
2734 if (aliased_devfn) {
2735 *aliased_devfn = devfn;
2736 }
2737 }
2738
pci_device_iommu_address_space(PCIDevice * dev)2739 AddressSpace *pci_device_iommu_address_space(PCIDevice *dev)
2740 {
2741 PCIBus *bus;
2742 PCIBus *iommu_bus;
2743 int devfn;
2744
2745 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn);
2746 if (iommu_bus) {
2747 return iommu_bus->iommu_ops->get_address_space(bus,
2748 iommu_bus->iommu_opaque, devfn);
2749 }
2750 return &address_space_memory;
2751 }
2752
pci_device_set_iommu_device(PCIDevice * dev,HostIOMMUDevice * hiod,Error ** errp)2753 bool pci_device_set_iommu_device(PCIDevice *dev, HostIOMMUDevice *hiod,
2754 Error **errp)
2755 {
2756 PCIBus *iommu_bus, *aliased_bus;
2757 int aliased_devfn;
2758
2759 /* set_iommu_device requires device's direct BDF instead of aliased BDF */
2760 pci_device_get_iommu_bus_devfn(dev, &iommu_bus,
2761 &aliased_bus, &aliased_devfn);
2762 if (iommu_bus && iommu_bus->iommu_ops->set_iommu_device) {
2763 hiod->aliased_bus = aliased_bus;
2764 hiod->aliased_devfn = aliased_devfn;
2765 return iommu_bus->iommu_ops->set_iommu_device(pci_get_bus(dev),
2766 iommu_bus->iommu_opaque,
2767 dev->devfn, hiod, errp);
2768 }
2769 return true;
2770 }
2771
pci_device_unset_iommu_device(PCIDevice * dev)2772 void pci_device_unset_iommu_device(PCIDevice *dev)
2773 {
2774 PCIBus *iommu_bus;
2775
2776 pci_device_get_iommu_bus_devfn(dev, &iommu_bus, NULL, NULL);
2777 if (iommu_bus && iommu_bus->iommu_ops->unset_iommu_device) {
2778 return iommu_bus->iommu_ops->unset_iommu_device(pci_get_bus(dev),
2779 iommu_bus->iommu_opaque,
2780 dev->devfn);
2781 }
2782 }
2783
pci_setup_iommu(PCIBus * bus,const PCIIOMMUOps * ops,void * opaque)2784 void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque)
2785 {
2786 /*
2787 * If called, pci_setup_iommu() should provide a minimum set of
2788 * useful callbacks for the bus.
2789 */
2790 assert(ops);
2791 assert(ops->get_address_space);
2792
2793 bus->iommu_ops = ops;
2794 bus->iommu_opaque = opaque;
2795 }
2796
pci_dev_get_w64(PCIBus * b,PCIDevice * dev,void * opaque)2797 static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque)
2798 {
2799 Range *range = opaque;
2800 uint16_t cmd = pci_get_word(dev->config + PCI_COMMAND);
2801 int i;
2802
2803 if (!(cmd & PCI_COMMAND_MEMORY)) {
2804 return;
2805 }
2806
2807 if (IS_PCI_BRIDGE(dev)) {
2808 pcibus_t base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
2809 pcibus_t limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
2810
2811 base = MAX(base, 0x1ULL << 32);
2812
2813 if (limit >= base) {
2814 Range pref_range;
2815 range_set_bounds(&pref_range, base, limit);
2816 range_extend(range, &pref_range);
2817 }
2818 }
2819 for (i = 0; i < PCI_NUM_REGIONS; ++i) {
2820 PCIIORegion *r = &dev->io_regions[i];
2821 pcibus_t lob, upb;
2822 Range region_range;
2823
2824 if (!r->size ||
2825 (r->type & PCI_BASE_ADDRESS_SPACE_IO) ||
2826 !(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64)) {
2827 continue;
2828 }
2829
2830 lob = pci_bar_address(dev, i, r->type, r->size);
2831 upb = lob + r->size - 1;
2832 if (lob == PCI_BAR_UNMAPPED) {
2833 continue;
2834 }
2835
2836 lob = MAX(lob, 0x1ULL << 32);
2837
2838 if (upb >= lob) {
2839 range_set_bounds(®ion_range, lob, upb);
2840 range_extend(range, ®ion_range);
2841 }
2842 }
2843 }
2844
pci_bus_get_w64_range(PCIBus * bus,Range * range)2845 void pci_bus_get_w64_range(PCIBus *bus, Range *range)
2846 {
2847 range_make_empty(range);
2848 pci_for_each_device_under_bus(bus, pci_dev_get_w64, range);
2849 }
2850
pcie_has_upstream_port(PCIDevice * dev)2851 static bool pcie_has_upstream_port(PCIDevice *dev)
2852 {
2853 PCIDevice *parent_dev = pci_bridge_get_device(pci_get_bus(dev));
2854
2855 /* Device associated with an upstream port.
2856 * As there are several types of these, it's easier to check the
2857 * parent device: upstream ports are always connected to
2858 * root or downstream ports.
2859 */
2860 return parent_dev &&
2861 pci_is_express(parent_dev) &&
2862 parent_dev->exp.exp_cap &&
2863 (pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_ROOT_PORT ||
2864 pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_DOWNSTREAM);
2865 }
2866
pci_get_function_0(PCIDevice * pci_dev)2867 PCIDevice *pci_get_function_0(PCIDevice *pci_dev)
2868 {
2869 PCIBus *bus = pci_get_bus(pci_dev);
2870
2871 if(pcie_has_upstream_port(pci_dev)) {
2872 /* With an upstream PCIe port, we only support 1 device at slot 0 */
2873 return bus->devices[0];
2874 } else {
2875 /* Other bus types might support multiple devices at slots 0-31 */
2876 return bus->devices[PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 0)];
2877 }
2878 }
2879
pci_get_msi_message(PCIDevice * dev,int vector)2880 MSIMessage pci_get_msi_message(PCIDevice *dev, int vector)
2881 {
2882 MSIMessage msg;
2883 if (msix_enabled(dev)) {
2884 msg = msix_get_message(dev, vector);
2885 } else if (msi_enabled(dev)) {
2886 msg = msi_get_message(dev, vector);
2887 } else {
2888 /* Should never happen */
2889 error_report("%s: unknown interrupt type", __func__);
2890 abort();
2891 }
2892 return msg;
2893 }
2894
pci_set_power(PCIDevice * d,bool state)2895 void pci_set_power(PCIDevice *d, bool state)
2896 {
2897 if (d->has_power == state) {
2898 return;
2899 }
2900
2901 d->has_power = state;
2902 pci_update_mappings(d);
2903 memory_region_set_enabled(&d->bus_master_enable_region,
2904 (pci_get_word(d->config + PCI_COMMAND)
2905 & PCI_COMMAND_MASTER) && d->has_power);
2906 if (!d->has_power) {
2907 pci_device_reset(d);
2908 }
2909 }
2910
2911 static const TypeInfo pci_device_type_info = {
2912 .name = TYPE_PCI_DEVICE,
2913 .parent = TYPE_DEVICE,
2914 .instance_size = sizeof(PCIDevice),
2915 .abstract = true,
2916 .class_size = sizeof(PCIDeviceClass),
2917 .class_init = pci_device_class_init,
2918 .class_base_init = pci_device_class_base_init,
2919 };
2920
pci_register_types(void)2921 static void pci_register_types(void)
2922 {
2923 type_register_static(&pci_bus_info);
2924 type_register_static(&pcie_bus_info);
2925 type_register_static(&cxl_bus_info);
2926 type_register_static(&conventional_pci_interface_info);
2927 type_register_static(&cxl_interface_info);
2928 type_register_static(&pcie_interface_info);
2929 type_register_static(&pci_device_type_info);
2930 }
2931
2932 type_init(pci_register_types)
2933