xref: /openbmc/qemu/hw/pci/pci.c (revision c3ae83117dfb198eae7f8afe8609e69674732cdb)
1 /*
2  * QEMU PCI bus manager
3  *
4  * Copyright (c) 2004 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "qemu/datadir.h"
27 #include "qemu/units.h"
28 #include "hw/irq.h"
29 #include "hw/pci/pci.h"
30 #include "hw/pci/pci_bridge.h"
31 #include "hw/pci/pci_bus.h"
32 #include "hw/pci/pci_host.h"
33 #include "hw/qdev-properties.h"
34 #include "hw/qdev-properties-system.h"
35 #include "migration/cpr.h"
36 #include "migration/qemu-file-types.h"
37 #include "migration/vmstate.h"
38 #include "net/net.h"
39 #include "system/numa.h"
40 #include "system/runstate.h"
41 #include "system/system.h"
42 #include "hw/loader.h"
43 #include "qemu/error-report.h"
44 #include "qemu/range.h"
45 #include "trace.h"
46 #include "hw/pci/msi.h"
47 #include "hw/pci/msix.h"
48 #include "hw/hotplug.h"
49 #include "hw/boards.h"
50 #include "hw/nvram/fw_cfg.h"
51 #include "qapi/error.h"
52 #include "qemu/cutils.h"
53 #include "pci-internal.h"
54 
55 #include "hw/xen/xen.h"
56 #include "hw/i386/kvm/xen_evtchn.h"
57 
58 bool pci_available = true;
59 
60 static char *pcibus_get_dev_path(DeviceState *dev);
61 static char *pcibus_get_fw_dev_path(DeviceState *dev);
62 static void pcibus_reset_hold(Object *obj, ResetType type);
63 static bool pcie_has_upstream_port(PCIDevice *dev);
64 
prop_pci_busnr_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)65 static void prop_pci_busnr_get(Object *obj, Visitor *v, const char *name,
66                              void *opaque, Error **errp)
67 {
68     uint8_t busnr = pci_dev_bus_num(PCI_DEVICE(obj));
69 
70     visit_type_uint8(v, name, &busnr, errp);
71 }
72 
73 static const PropertyInfo prop_pci_busnr = {
74     .type = "busnr",
75     .get = prop_pci_busnr_get,
76 };
77 
78 static const Property pci_props[] = {
79     DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1),
80     DEFINE_PROP_STRING("romfile", PCIDevice, romfile),
81     DEFINE_PROP_UINT32("romsize", PCIDevice, romsize, UINT32_MAX),
82     DEFINE_PROP_INT32("rombar",  PCIDevice, rom_bar, -1),
83     DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present,
84                     QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false),
85     DEFINE_PROP_BIT("x-pcie-lnksta-dllla", PCIDevice, cap_present,
86                     QEMU_PCIE_LNKSTA_DLLLA_BITNR, true),
87     DEFINE_PROP_BIT("x-pcie-extcap-init", PCIDevice, cap_present,
88                     QEMU_PCIE_EXTCAP_INIT_BITNR, true),
89     DEFINE_PROP_STRING("failover_pair_id", PCIDevice,
90                        failover_pair_id),
91     DEFINE_PROP_UINT32("acpi-index",  PCIDevice, acpi_index, 0),
92     DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present,
93                     QEMU_PCIE_ERR_UNC_MASK_BITNR, true),
94     DEFINE_PROP_BIT("x-pcie-ari-nextfn-1", PCIDevice, cap_present,
95                     QEMU_PCIE_ARI_NEXTFN_1_BITNR, false),
96     DEFINE_PROP_SIZE32("x-max-bounce-buffer-size", PCIDevice,
97                      max_bounce_buffer_size, DEFAULT_MAX_BOUNCE_BUFFER_SIZE),
98     DEFINE_PROP_STRING("sriov-pf", PCIDevice, sriov_pf),
99     DEFINE_PROP_BIT("x-pcie-ext-tag", PCIDevice, cap_present,
100                     QEMU_PCIE_EXT_TAG_BITNR, true),
101     { .name = "busnr", .info = &prop_pci_busnr },
102 };
103 
104 static const VMStateDescription vmstate_pcibus = {
105     .name = "PCIBUS",
106     .version_id = 1,
107     .minimum_version_id = 1,
108     .fields = (const VMStateField[]) {
109         VMSTATE_INT32_EQUAL(nirq, PCIBus, NULL),
110         VMSTATE_VARRAY_INT32(irq_count, PCIBus,
111                              nirq, 0, vmstate_info_int32,
112                              int32_t),
113         VMSTATE_END_OF_LIST()
114     }
115 };
116 
g_cmp_uint32(gconstpointer a,gconstpointer b,gpointer user_data)117 static gint g_cmp_uint32(gconstpointer a, gconstpointer b, gpointer user_data)
118 {
119     return a - b;
120 }
121 
pci_acpi_index_list(void)122 static GSequence *pci_acpi_index_list(void)
123 {
124     static GSequence *used_acpi_index_list;
125 
126     if (!used_acpi_index_list) {
127         used_acpi_index_list = g_sequence_new(NULL);
128     }
129     return used_acpi_index_list;
130 }
131 
pci_set_master(PCIDevice * d,bool enable)132 static void pci_set_master(PCIDevice *d, bool enable)
133 {
134     memory_region_set_enabled(&d->bus_master_enable_region, enable);
135     d->is_master = enable; /* cache the status */
136 }
137 
pci_init_bus_master(PCIDevice * pci_dev)138 static void pci_init_bus_master(PCIDevice *pci_dev)
139 {
140     AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev);
141 
142     memory_region_init_alias(&pci_dev->bus_master_enable_region,
143                              OBJECT(pci_dev), "bus master",
144                              dma_as->root, 0, memory_region_size(dma_as->root));
145     pci_set_master(pci_dev, false);
146     memory_region_add_subregion(&pci_dev->bus_master_container_region, 0,
147                                 &pci_dev->bus_master_enable_region);
148 }
149 
pcibus_machine_done(Notifier * notifier,void * data)150 static void pcibus_machine_done(Notifier *notifier, void *data)
151 {
152     PCIBus *bus = container_of(notifier, PCIBus, machine_done);
153     int i;
154 
155     for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
156         if (bus->devices[i]) {
157             pci_init_bus_master(bus->devices[i]);
158         }
159     }
160 }
161 
pci_bus_realize(BusState * qbus,Error ** errp)162 static void pci_bus_realize(BusState *qbus, Error **errp)
163 {
164     PCIBus *bus = PCI_BUS(qbus);
165 
166     bus->machine_done.notify = pcibus_machine_done;
167     qemu_add_machine_init_done_notifier(&bus->machine_done);
168 
169     vmstate_register_any(NULL, &vmstate_pcibus, bus);
170 }
171 
pcie_bus_realize(BusState * qbus,Error ** errp)172 static void pcie_bus_realize(BusState *qbus, Error **errp)
173 {
174     PCIBus *bus = PCI_BUS(qbus);
175     Error *local_err = NULL;
176 
177     pci_bus_realize(qbus, &local_err);
178     if (local_err) {
179         error_propagate(errp, local_err);
180         return;
181     }
182 
183     /*
184      * A PCI-E bus can support extended config space if it's the root
185      * bus, or if the bus/bridge above it does as well
186      */
187     if (pci_bus_is_root(bus)) {
188         bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE;
189     } else {
190         PCIBus *parent_bus = pci_get_bus(bus->parent_dev);
191 
192         if (pci_bus_allows_extended_config_space(parent_bus)) {
193             bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE;
194         }
195     }
196 }
197 
pci_bus_unrealize(BusState * qbus)198 static void pci_bus_unrealize(BusState *qbus)
199 {
200     PCIBus *bus = PCI_BUS(qbus);
201 
202     qemu_remove_machine_init_done_notifier(&bus->machine_done);
203 
204     vmstate_unregister(NULL, &vmstate_pcibus, bus);
205 }
206 
pcibus_num(PCIBus * bus)207 static int pcibus_num(PCIBus *bus)
208 {
209     if (pci_bus_is_root(bus)) {
210         return 0; /* pci host bridge */
211     }
212     return bus->parent_dev->config[PCI_SECONDARY_BUS];
213 }
214 
pcibus_numa_node(PCIBus * bus)215 static uint16_t pcibus_numa_node(PCIBus *bus)
216 {
217     return NUMA_NODE_UNASSIGNED;
218 }
219 
pci_bus_add_fw_cfg_extra_pci_roots(FWCfgState * fw_cfg,PCIBus * bus,Error ** errp)220 bool pci_bus_add_fw_cfg_extra_pci_roots(FWCfgState *fw_cfg,
221                                         PCIBus *bus,
222                                         Error **errp)
223 {
224     Object *obj;
225 
226     if (!bus) {
227         return true;
228     }
229     obj = OBJECT(bus);
230 
231     return fw_cfg_add_file_from_generator(fw_cfg, obj->parent,
232                                           object_get_canonical_path_component(obj),
233                                           "etc/extra-pci-roots", errp);
234 }
235 
pci_bus_fw_cfg_gen_data(Object * obj,Error ** errp)236 static GByteArray *pci_bus_fw_cfg_gen_data(Object *obj, Error **errp)
237 {
238     PCIBus *bus = PCI_BUS(obj);
239     GByteArray *byte_array;
240     uint64_t extra_hosts = 0;
241 
242     if (!bus) {
243         return NULL;
244     }
245 
246     QLIST_FOREACH(bus, &bus->child, sibling) {
247         /* look for expander root buses */
248         if (pci_bus_is_root(bus)) {
249             extra_hosts++;
250         }
251     }
252 
253     if (!extra_hosts) {
254         return NULL;
255     }
256     extra_hosts = cpu_to_le64(extra_hosts);
257 
258     byte_array = g_byte_array_new();
259     g_byte_array_append(byte_array,
260                         (const void *)&extra_hosts, sizeof(extra_hosts));
261 
262     return byte_array;
263 }
264 
pci_bus_class_init(ObjectClass * klass,const void * data)265 static void pci_bus_class_init(ObjectClass *klass, const void *data)
266 {
267     BusClass *k = BUS_CLASS(klass);
268     PCIBusClass *pbc = PCI_BUS_CLASS(klass);
269     ResettableClass *rc = RESETTABLE_CLASS(klass);
270     FWCfgDataGeneratorClass *fwgc = FW_CFG_DATA_GENERATOR_CLASS(klass);
271 
272     k->print_dev = pcibus_dev_print;
273     k->get_dev_path = pcibus_get_dev_path;
274     k->get_fw_dev_path = pcibus_get_fw_dev_path;
275     k->realize = pci_bus_realize;
276     k->unrealize = pci_bus_unrealize;
277 
278     rc->phases.hold = pcibus_reset_hold;
279 
280     pbc->bus_num = pcibus_num;
281     pbc->numa_node = pcibus_numa_node;
282 
283     fwgc->get_data = pci_bus_fw_cfg_gen_data;
284 }
285 
286 static const TypeInfo pci_bus_info = {
287     .name = TYPE_PCI_BUS,
288     .parent = TYPE_BUS,
289     .instance_size = sizeof(PCIBus),
290     .class_size = sizeof(PCIBusClass),
291     .class_init = pci_bus_class_init,
292     .interfaces = (const InterfaceInfo[]) {
293         { TYPE_FW_CFG_DATA_GENERATOR_INTERFACE },
294         { }
295     }
296 };
297 
298 static const TypeInfo cxl_interface_info = {
299     .name          = INTERFACE_CXL_DEVICE,
300     .parent        = TYPE_INTERFACE,
301 };
302 
303 static const TypeInfo pcie_interface_info = {
304     .name          = INTERFACE_PCIE_DEVICE,
305     .parent        = TYPE_INTERFACE,
306 };
307 
308 static const TypeInfo conventional_pci_interface_info = {
309     .name          = INTERFACE_CONVENTIONAL_PCI_DEVICE,
310     .parent        = TYPE_INTERFACE,
311 };
312 
pcie_bus_class_init(ObjectClass * klass,const void * data)313 static void pcie_bus_class_init(ObjectClass *klass, const void *data)
314 {
315     BusClass *k = BUS_CLASS(klass);
316 
317     k->realize = pcie_bus_realize;
318 }
319 
320 static const TypeInfo pcie_bus_info = {
321     .name = TYPE_PCIE_BUS,
322     .parent = TYPE_PCI_BUS,
323     .class_init = pcie_bus_class_init,
324 };
325 
326 static const TypeInfo cxl_bus_info = {
327     .name       = TYPE_CXL_BUS,
328     .parent     = TYPE_PCIE_BUS,
329     .class_init = pcie_bus_class_init,
330 };
331 
332 static void pci_update_mappings(PCIDevice *d);
333 static void pci_irq_handler(void *opaque, int irq_num, int level);
334 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, Error **);
335 static void pci_del_option_rom(PCIDevice *pdev);
336 
337 static uint16_t pci_default_sub_vendor_id = PCI_SUBVENDOR_ID_REDHAT_QUMRANET;
338 static uint16_t pci_default_sub_device_id = PCI_SUBDEVICE_ID_QEMU;
339 
340 PCIHostStateList pci_host_bridges;
341 
pci_bar(PCIDevice * d,int reg)342 int pci_bar(PCIDevice *d, int reg)
343 {
344     uint8_t type;
345 
346     /* PCIe virtual functions do not have their own BARs */
347     assert(!pci_is_vf(d));
348 
349     if (reg != PCI_ROM_SLOT)
350         return PCI_BASE_ADDRESS_0 + reg * 4;
351 
352     type = d->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION;
353     return type == PCI_HEADER_TYPE_BRIDGE ? PCI_ROM_ADDRESS1 : PCI_ROM_ADDRESS;
354 }
355 
pci_irq_state(PCIDevice * d,int irq_num)356 static inline int pci_irq_state(PCIDevice *d, int irq_num)
357 {
358         return (d->irq_state >> irq_num) & 0x1;
359 }
360 
pci_set_irq_state(PCIDevice * d,int irq_num,int level)361 static inline void pci_set_irq_state(PCIDevice *d, int irq_num, int level)
362 {
363         d->irq_state &= ~(0x1 << irq_num);
364         d->irq_state |= level << irq_num;
365 }
366 
pci_bus_change_irq_level(PCIBus * bus,int irq_num,int change)367 static void pci_bus_change_irq_level(PCIBus *bus, int irq_num, int change)
368 {
369     assert(irq_num >= 0);
370     assert(irq_num < bus->nirq);
371     bus->irq_count[irq_num] += change;
372     bus->set_irq(bus->irq_opaque, irq_num, bus->irq_count[irq_num] != 0);
373 }
374 
pci_change_irq_level(PCIDevice * pci_dev,int irq_num,int change)375 static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change)
376 {
377     PCIBus *bus;
378     for (;;) {
379         int dev_irq = irq_num;
380         bus = pci_get_bus(pci_dev);
381         assert(bus->map_irq);
382         irq_num = bus->map_irq(pci_dev, irq_num);
383         trace_pci_route_irq(dev_irq, DEVICE(pci_dev)->canonical_path, irq_num,
384                             pci_bus_is_root(bus) ? "root-complex"
385                                     : DEVICE(bus->parent_dev)->canonical_path);
386         if (bus->set_irq)
387             break;
388         pci_dev = bus->parent_dev;
389     }
390     pci_bus_change_irq_level(bus, irq_num, change);
391 }
392 
pci_bus_get_irq_level(PCIBus * bus,int irq_num)393 int pci_bus_get_irq_level(PCIBus *bus, int irq_num)
394 {
395     assert(irq_num >= 0);
396     assert(irq_num < bus->nirq);
397     return !!bus->irq_count[irq_num];
398 }
399 
400 /* Update interrupt status bit in config space on interrupt
401  * state change. */
pci_update_irq_status(PCIDevice * dev)402 static void pci_update_irq_status(PCIDevice *dev)
403 {
404     if (dev->irq_state) {
405         dev->config[PCI_STATUS] |= PCI_STATUS_INTERRUPT;
406     } else {
407         dev->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT;
408     }
409 }
410 
pci_device_deassert_intx(PCIDevice * dev)411 void pci_device_deassert_intx(PCIDevice *dev)
412 {
413     int i;
414     for (i = 0; i < PCI_NUM_PINS; ++i) {
415         pci_irq_handler(dev, i, 0);
416     }
417 }
418 
pci_msi_trigger(PCIDevice * dev,MSIMessage msg)419 static void pci_msi_trigger(PCIDevice *dev, MSIMessage msg)
420 {
421     MemTxAttrs attrs = {};
422 
423     /*
424      * Xen uses the high bits of the address to contain some of the bits
425      * of the PIRQ#. Therefore we can't just send the write cycle and
426      * trust that it's caught by the APIC at 0xfee00000 because the
427      * target of the write might be e.g. 0x0x1000fee46000 for PIRQ#4166.
428      * So we intercept the delivery here instead of in kvm_send_msi().
429      */
430     if (xen_mode == XEN_EMULATE &&
431         xen_evtchn_deliver_pirq_msi(msg.address, msg.data)) {
432         return;
433     }
434     attrs.requester_id = pci_requester_id(dev);
435     address_space_stl_le(&dev->bus_master_as, msg.address, msg.data,
436                          attrs, NULL);
437 }
438 
439 /*
440  * Register and track a PM capability.  If wmask is also enabled for the power
441  * state field of the pmcsr register, guest writes may change the device PM
442  * state.  BAR access is only enabled while the device is in the D0 state.
443  * Return the capability offset or negative error code.
444  */
pci_pm_init(PCIDevice * d,uint8_t offset,Error ** errp)445 int pci_pm_init(PCIDevice *d, uint8_t offset, Error **errp)
446 {
447     int cap = pci_add_capability(d, PCI_CAP_ID_PM, offset, PCI_PM_SIZEOF, errp);
448 
449     if (cap < 0) {
450         return cap;
451     }
452 
453     d->pm_cap = cap;
454     d->cap_present |= QEMU_PCI_CAP_PM;
455 
456     return cap;
457 }
458 
pci_pm_state(PCIDevice * d)459 static uint8_t pci_pm_state(PCIDevice *d)
460 {
461     uint16_t pmcsr;
462 
463     if (!(d->cap_present & QEMU_PCI_CAP_PM)) {
464         return 0;
465     }
466 
467     pmcsr = pci_get_word(d->config + d->pm_cap + PCI_PM_CTRL);
468 
469     return pmcsr & PCI_PM_CTRL_STATE_MASK;
470 }
471 
472 /*
473  * Update the PM capability state based on the new value stored in config
474  * space respective to the old, pre-write state provided.  If the new value
475  * is rejected (unsupported or invalid transition) restore the old value.
476  * Return the resulting PM state.
477  */
pci_pm_update(PCIDevice * d,uint32_t addr,int l,uint8_t old)478 static uint8_t pci_pm_update(PCIDevice *d, uint32_t addr, int l, uint8_t old)
479 {
480     uint16_t pmc;
481     uint8_t new;
482 
483     if (!(d->cap_present & QEMU_PCI_CAP_PM) ||
484         !range_covers_byte(addr, l, d->pm_cap + PCI_PM_CTRL)) {
485         return old;
486     }
487 
488     new = pci_pm_state(d);
489     if (new == old) {
490         return old;
491     }
492 
493     pmc = pci_get_word(d->config + d->pm_cap + PCI_PM_PMC);
494 
495     /*
496      * Transitions to D1 & D2 are only allowed if supported.  Devices may
497      * only transition to higher D-states or to D0.
498      */
499     if ((!(pmc & PCI_PM_CAP_D1) && new == 1) ||
500         (!(pmc & PCI_PM_CAP_D2) && new == 2) ||
501         (old && new && new < old)) {
502         pci_word_test_and_clear_mask(d->config + d->pm_cap + PCI_PM_CTRL,
503                                      PCI_PM_CTRL_STATE_MASK);
504         pci_word_test_and_set_mask(d->config + d->pm_cap + PCI_PM_CTRL,
505                                    old);
506         trace_pci_pm_bad_transition(d->name, pci_dev_bus_num(d),
507                                     PCI_SLOT(d->devfn), PCI_FUNC(d->devfn),
508                                     old, new);
509         return old;
510     }
511 
512     trace_pci_pm_transition(d->name, pci_dev_bus_num(d), PCI_SLOT(d->devfn),
513                             PCI_FUNC(d->devfn), old, new);
514     return new;
515 }
516 
pci_reset_regions(PCIDevice * dev)517 static void pci_reset_regions(PCIDevice *dev)
518 {
519     int r;
520     if (pci_is_vf(dev)) {
521         return;
522     }
523 
524     for (r = 0; r < PCI_NUM_REGIONS; ++r) {
525         PCIIORegion *region = &dev->io_regions[r];
526         if (!region->size) {
527             continue;
528         }
529 
530         if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) &&
531             region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
532             pci_set_quad(dev->config + pci_bar(dev, r), region->type);
533         } else {
534             pci_set_long(dev->config + pci_bar(dev, r), region->type);
535         }
536     }
537 }
538 
pci_do_device_reset(PCIDevice * dev)539 static void pci_do_device_reset(PCIDevice *dev)
540 {
541     if ((dev->cap_present & QEMU_PCI_SKIP_RESET_ON_CPR) && cpr_is_incoming()) {
542         return;
543     }
544 
545     pci_device_deassert_intx(dev);
546     assert(dev->irq_state == 0);
547 
548     /* Clear all writable bits */
549     pci_word_test_and_clear_mask(dev->config + PCI_COMMAND,
550                                  pci_get_word(dev->wmask + PCI_COMMAND) |
551                                  pci_get_word(dev->w1cmask + PCI_COMMAND));
552     pci_word_test_and_clear_mask(dev->config + PCI_STATUS,
553                                  pci_get_word(dev->wmask + PCI_STATUS) |
554                                  pci_get_word(dev->w1cmask + PCI_STATUS));
555     /* Some devices make bits of PCI_INTERRUPT_LINE read only */
556     pci_byte_test_and_clear_mask(dev->config + PCI_INTERRUPT_LINE,
557                               pci_get_word(dev->wmask + PCI_INTERRUPT_LINE) |
558                               pci_get_word(dev->w1cmask + PCI_INTERRUPT_LINE));
559     dev->config[PCI_CACHE_LINE_SIZE] = 0x0;
560     /* Default PM state is D0 */
561     if (dev->cap_present & QEMU_PCI_CAP_PM) {
562         pci_word_test_and_clear_mask(dev->config + dev->pm_cap + PCI_PM_CTRL,
563                                      PCI_PM_CTRL_STATE_MASK);
564     }
565     pci_reset_regions(dev);
566     pci_update_mappings(dev);
567 
568     msi_reset(dev);
569     msix_reset(dev);
570     pcie_sriov_pf_reset(dev);
571 }
572 
573 /*
574  * This function is called on #RST and FLR.
575  * FLR if PCI_EXP_DEVCTL_BCR_FLR is set
576  */
pci_device_reset(PCIDevice * dev)577 void pci_device_reset(PCIDevice *dev)
578 {
579     device_cold_reset(&dev->qdev);
580     pci_do_device_reset(dev);
581 }
582 
583 /*
584  * Trigger pci bus reset under a given bus.
585  * Called via bus_cold_reset on RST# assert, after the devices
586  * have been reset device_cold_reset-ed already.
587  */
pcibus_reset_hold(Object * obj,ResetType type)588 static void pcibus_reset_hold(Object *obj, ResetType type)
589 {
590     PCIBus *bus = PCI_BUS(obj);
591     int i;
592 
593     for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
594         if (bus->devices[i]) {
595             pci_do_device_reset(bus->devices[i]);
596         }
597     }
598 
599     for (i = 0; i < bus->nirq; i++) {
600         assert(bus->irq_count[i] == 0);
601     }
602 }
603 
pci_host_bus_register(DeviceState * host)604 static void pci_host_bus_register(DeviceState *host)
605 {
606     PCIHostState *host_bridge = PCI_HOST_BRIDGE(host);
607 
608     QLIST_INSERT_HEAD(&pci_host_bridges, host_bridge, next);
609 }
610 
pci_host_bus_unregister(DeviceState * host)611 static void pci_host_bus_unregister(DeviceState *host)
612 {
613     PCIHostState *host_bridge = PCI_HOST_BRIDGE(host);
614 
615     QLIST_REMOVE(host_bridge, next);
616 }
617 
pci_device_root_bus(const PCIDevice * d)618 PCIBus *pci_device_root_bus(const PCIDevice *d)
619 {
620     PCIBus *bus = pci_get_bus(d);
621 
622     while (!pci_bus_is_root(bus)) {
623         d = bus->parent_dev;
624         assert(d != NULL);
625 
626         bus = pci_get_bus(d);
627     }
628 
629     return bus;
630 }
631 
pci_root_bus_path(PCIDevice * dev)632 const char *pci_root_bus_path(PCIDevice *dev)
633 {
634     PCIBus *rootbus = pci_device_root_bus(dev);
635     PCIHostState *host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent);
636     PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_GET_CLASS(host_bridge);
637 
638     assert(host_bridge->bus == rootbus);
639 
640     if (hc->root_bus_path) {
641         return (*hc->root_bus_path)(host_bridge, rootbus);
642     }
643 
644     return rootbus->qbus.name;
645 }
646 
pci_bus_bypass_iommu(PCIBus * bus)647 bool pci_bus_bypass_iommu(PCIBus *bus)
648 {
649     PCIBus *rootbus = bus;
650     PCIHostState *host_bridge;
651 
652     if (!pci_bus_is_root(bus)) {
653         rootbus = pci_device_root_bus(bus->parent_dev);
654     }
655 
656     host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent);
657 
658     assert(host_bridge->bus == rootbus);
659 
660     return host_bridge->bypass_iommu;
661 }
662 
pci_root_bus_internal_init(PCIBus * bus,DeviceState * parent,MemoryRegion * mem,MemoryRegion * io,uint8_t devfn_min)663 static void pci_root_bus_internal_init(PCIBus *bus, DeviceState *parent,
664                                        MemoryRegion *mem, MemoryRegion *io,
665                                        uint8_t devfn_min)
666 {
667     assert(PCI_FUNC(devfn_min) == 0);
668     bus->devfn_min = devfn_min;
669     bus->slot_reserved_mask = 0x0;
670     bus->address_space_mem = mem;
671     bus->address_space_io = io;
672     bus->flags |= PCI_BUS_IS_ROOT;
673 
674     /* host bridge */
675     QLIST_INIT(&bus->child);
676 
677     pci_host_bus_register(parent);
678 }
679 
pci_bus_uninit(PCIBus * bus)680 static void pci_bus_uninit(PCIBus *bus)
681 {
682     pci_host_bus_unregister(BUS(bus)->parent);
683 }
684 
pci_bus_is_express(const PCIBus * bus)685 bool pci_bus_is_express(const PCIBus *bus)
686 {
687     return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS);
688 }
689 
pci_root_bus_init(PCIBus * bus,size_t bus_size,DeviceState * parent,const char * name,MemoryRegion * mem,MemoryRegion * io,uint8_t devfn_min,const char * typename)690 void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent,
691                        const char *name,
692                        MemoryRegion *mem, MemoryRegion *io,
693                        uint8_t devfn_min, const char *typename)
694 {
695     qbus_init(bus, bus_size, typename, parent, name);
696     pci_root_bus_internal_init(bus, parent, mem, io, devfn_min);
697 }
698 
pci_root_bus_new(DeviceState * parent,const char * name,MemoryRegion * mem,MemoryRegion * io,uint8_t devfn_min,const char * typename)699 PCIBus *pci_root_bus_new(DeviceState *parent, const char *name,
700                          MemoryRegion *mem, MemoryRegion *io,
701                          uint8_t devfn_min, const char *typename)
702 {
703     PCIBus *bus;
704 
705     bus = PCI_BUS(qbus_new(typename, parent, name));
706     pci_root_bus_internal_init(bus, parent, mem, io, devfn_min);
707     return bus;
708 }
709 
pci_root_bus_cleanup(PCIBus * bus)710 void pci_root_bus_cleanup(PCIBus *bus)
711 {
712     pci_bus_uninit(bus);
713     /* the caller of the unplug hotplug handler will delete this device */
714     qbus_unrealize(BUS(bus));
715 }
716 
pci_bus_irqs(PCIBus * bus,pci_set_irq_fn set_irq,void * irq_opaque,int nirq)717 void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq,
718                   void *irq_opaque, int nirq)
719 {
720     bus->set_irq = set_irq;
721     bus->irq_opaque = irq_opaque;
722     bus->nirq = nirq;
723     g_free(bus->irq_count);
724     bus->irq_count = g_malloc0(nirq * sizeof(bus->irq_count[0]));
725 }
726 
pci_bus_map_irqs(PCIBus * bus,pci_map_irq_fn map_irq)727 void pci_bus_map_irqs(PCIBus *bus, pci_map_irq_fn map_irq)
728 {
729     bus->map_irq = map_irq;
730 }
731 
pci_bus_irqs_cleanup(PCIBus * bus)732 void pci_bus_irqs_cleanup(PCIBus *bus)
733 {
734     bus->set_irq = NULL;
735     bus->map_irq = NULL;
736     bus->irq_opaque = NULL;
737     bus->nirq = 0;
738     g_free(bus->irq_count);
739     bus->irq_count = NULL;
740 }
741 
pci_register_root_bus(DeviceState * parent,const char * name,pci_set_irq_fn set_irq,pci_map_irq_fn map_irq,void * irq_opaque,MemoryRegion * mem,MemoryRegion * io,uint8_t devfn_min,int nirq,const char * typename)742 PCIBus *pci_register_root_bus(DeviceState *parent, const char *name,
743                               pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
744                               void *irq_opaque,
745                               MemoryRegion *mem, MemoryRegion *io,
746                               uint8_t devfn_min, int nirq,
747                               const char *typename)
748 {
749     PCIBus *bus;
750 
751     bus = pci_root_bus_new(parent, name, mem, io, devfn_min, typename);
752     pci_bus_irqs(bus, set_irq, irq_opaque, nirq);
753     pci_bus_map_irqs(bus, map_irq);
754     return bus;
755 }
756 
pci_unregister_root_bus(PCIBus * bus)757 void pci_unregister_root_bus(PCIBus *bus)
758 {
759     pci_bus_irqs_cleanup(bus);
760     pci_root_bus_cleanup(bus);
761 }
762 
pci_bus_num(PCIBus * s)763 int pci_bus_num(PCIBus *s)
764 {
765     return PCI_BUS_GET_CLASS(s)->bus_num(s);
766 }
767 
768 /* Returns the min and max bus numbers of a PCI bus hierarchy */
pci_bus_range(PCIBus * bus,int * min_bus,int * max_bus)769 void pci_bus_range(PCIBus *bus, int *min_bus, int *max_bus)
770 {
771     int i;
772     *min_bus = *max_bus = pci_bus_num(bus);
773 
774     for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
775         PCIDevice *dev = bus->devices[i];
776 
777         if (dev && IS_PCI_BRIDGE(dev)) {
778             *min_bus = MIN(*min_bus, dev->config[PCI_SECONDARY_BUS]);
779             *max_bus = MAX(*max_bus, dev->config[PCI_SUBORDINATE_BUS]);
780         }
781     }
782 }
783 
pci_bus_numa_node(PCIBus * bus)784 int pci_bus_numa_node(PCIBus *bus)
785 {
786     return PCI_BUS_GET_CLASS(bus)->numa_node(bus);
787 }
788 
get_pci_config_device(QEMUFile * f,void * pv,size_t size,const VMStateField * field)789 static int get_pci_config_device(QEMUFile *f, void *pv, size_t size,
790                                  const VMStateField *field)
791 {
792     PCIDevice *s = container_of(pv, PCIDevice, config);
793     uint8_t *config;
794     int i;
795 
796     assert(size == pci_config_size(s));
797     config = g_malloc(size);
798 
799     qemu_get_buffer(f, config, size);
800     for (i = 0; i < size; ++i) {
801         if ((config[i] ^ s->config[i]) &
802             s->cmask[i] & ~s->wmask[i] & ~s->w1cmask[i]) {
803             error_report("%s: Bad config data: i=0x%x read: %x device: %x "
804                          "cmask: %x wmask: %x w1cmask:%x", __func__,
805                          i, config[i], s->config[i],
806                          s->cmask[i], s->wmask[i], s->w1cmask[i]);
807             g_free(config);
808             return -EINVAL;
809         }
810     }
811     memcpy(s->config, config, size);
812 
813     pci_update_mappings(s);
814     if (IS_PCI_BRIDGE(s)) {
815         pci_bridge_update_mappings(PCI_BRIDGE(s));
816     }
817 
818     pci_set_master(s, pci_get_word(s->config + PCI_COMMAND)
819                       & PCI_COMMAND_MASTER);
820 
821     g_free(config);
822     return 0;
823 }
824 
825 /* just put buffer */
put_pci_config_device(QEMUFile * f,void * pv,size_t size,const VMStateField * field,JSONWriter * vmdesc)826 static int put_pci_config_device(QEMUFile *f, void *pv, size_t size,
827                                  const VMStateField *field, JSONWriter *vmdesc)
828 {
829     const uint8_t **v = pv;
830     assert(size == pci_config_size(container_of(pv, PCIDevice, config)));
831     qemu_put_buffer(f, *v, size);
832 
833     return 0;
834 }
835 
836 static const VMStateInfo vmstate_info_pci_config = {
837     .name = "pci config",
838     .get  = get_pci_config_device,
839     .put  = put_pci_config_device,
840 };
841 
get_pci_irq_state(QEMUFile * f,void * pv,size_t size,const VMStateField * field)842 static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size,
843                              const VMStateField *field)
844 {
845     PCIDevice *s = container_of(pv, PCIDevice, irq_state);
846     uint32_t irq_state[PCI_NUM_PINS];
847     int i;
848     for (i = 0; i < PCI_NUM_PINS; ++i) {
849         irq_state[i] = qemu_get_be32(f);
850         if (irq_state[i] != 0x1 && irq_state[i] != 0) {
851             fprintf(stderr, "irq state %d: must be 0 or 1.\n",
852                     irq_state[i]);
853             return -EINVAL;
854         }
855     }
856 
857     for (i = 0; i < PCI_NUM_PINS; ++i) {
858         pci_set_irq_state(s, i, irq_state[i]);
859     }
860 
861     return 0;
862 }
863 
put_pci_irq_state(QEMUFile * f,void * pv,size_t size,const VMStateField * field,JSONWriter * vmdesc)864 static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size,
865                              const VMStateField *field, JSONWriter *vmdesc)
866 {
867     int i;
868     PCIDevice *s = container_of(pv, PCIDevice, irq_state);
869 
870     for (i = 0; i < PCI_NUM_PINS; ++i) {
871         qemu_put_be32(f, pci_irq_state(s, i));
872     }
873 
874     return 0;
875 }
876 
877 static const VMStateInfo vmstate_info_pci_irq_state = {
878     .name = "pci irq state",
879     .get  = get_pci_irq_state,
880     .put  = put_pci_irq_state,
881 };
882 
migrate_is_pcie(void * opaque,int version_id)883 static bool migrate_is_pcie(void *opaque, int version_id)
884 {
885     return pci_is_express((PCIDevice *)opaque);
886 }
887 
migrate_is_not_pcie(void * opaque,int version_id)888 static bool migrate_is_not_pcie(void *opaque, int version_id)
889 {
890     return !pci_is_express((PCIDevice *)opaque);
891 }
892 
pci_post_load(void * opaque,int version_id)893 static int pci_post_load(void *opaque, int version_id)
894 {
895     pcie_sriov_pf_post_load(opaque);
896     return 0;
897 }
898 
899 const VMStateDescription vmstate_pci_device = {
900     .name = "PCIDevice",
901     .version_id = 2,
902     .minimum_version_id = 1,
903     .post_load = pci_post_load,
904     .fields = (const VMStateField[]) {
905         VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice),
906         VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice,
907                                    migrate_is_not_pcie,
908                                    0, vmstate_info_pci_config,
909                                    PCI_CONFIG_SPACE_SIZE),
910         VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice,
911                                    migrate_is_pcie,
912                                    0, vmstate_info_pci_config,
913                                    PCIE_CONFIG_SPACE_SIZE),
914         VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2,
915                                    vmstate_info_pci_irq_state,
916                                    PCI_NUM_PINS * sizeof(int32_t)),
917         VMSTATE_END_OF_LIST()
918     }
919 };
920 
921 
pci_device_save(PCIDevice * s,QEMUFile * f)922 void pci_device_save(PCIDevice *s, QEMUFile *f)
923 {
924     /* Clear interrupt status bit: it is implicit
925      * in irq_state which we are saving.
926      * This makes us compatible with old devices
927      * which never set or clear this bit. */
928     s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT;
929     vmstate_save_state(f, &vmstate_pci_device, s, NULL);
930     /* Restore the interrupt status bit. */
931     pci_update_irq_status(s);
932 }
933 
pci_device_load(PCIDevice * s,QEMUFile * f)934 int pci_device_load(PCIDevice *s, QEMUFile *f)
935 {
936     int ret;
937     ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id);
938     /* Restore the interrupt status bit. */
939     pci_update_irq_status(s);
940     return ret;
941 }
942 
pci_set_default_subsystem_id(PCIDevice * pci_dev)943 static void pci_set_default_subsystem_id(PCIDevice *pci_dev)
944 {
945     pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
946                  pci_default_sub_vendor_id);
947     pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
948                  pci_default_sub_device_id);
949 }
950 
951 /*
952  * Parse [[<domain>:]<bus>:]<slot>, return -1 on error if funcp == NULL
953  *       [[<domain>:]<bus>:]<slot>.<func>, return -1 on error
954  */
pci_parse_devaddr(const char * addr,int * domp,int * busp,unsigned int * slotp,unsigned int * funcp)955 static int pci_parse_devaddr(const char *addr, int *domp, int *busp,
956                              unsigned int *slotp, unsigned int *funcp)
957 {
958     const char *p;
959     char *e;
960     unsigned long val;
961     unsigned long dom = 0, bus = 0;
962     unsigned int slot = 0;
963     unsigned int func = 0;
964 
965     p = addr;
966     val = strtoul(p, &e, 16);
967     if (e == p)
968         return -1;
969     if (*e == ':') {
970         bus = val;
971         p = e + 1;
972         val = strtoul(p, &e, 16);
973         if (e == p)
974             return -1;
975         if (*e == ':') {
976             dom = bus;
977             bus = val;
978             p = e + 1;
979             val = strtoul(p, &e, 16);
980             if (e == p)
981                 return -1;
982         }
983     }
984 
985     slot = val;
986 
987     if (funcp != NULL) {
988         if (*e != '.')
989             return -1;
990 
991         p = e + 1;
992         val = strtoul(p, &e, 16);
993         if (e == p)
994             return -1;
995 
996         func = val;
997     }
998 
999     /* if funcp == NULL func is 0 */
1000     if (dom > 0xffff || bus > 0xff || slot > 0x1f || func > 7)
1001         return -1;
1002 
1003     if (*e)
1004         return -1;
1005 
1006     *domp = dom;
1007     *busp = bus;
1008     *slotp = slot;
1009     if (funcp != NULL)
1010         *funcp = func;
1011     return 0;
1012 }
1013 
pci_init_cmask(PCIDevice * dev)1014 static void pci_init_cmask(PCIDevice *dev)
1015 {
1016     pci_set_word(dev->cmask + PCI_VENDOR_ID, 0xffff);
1017     pci_set_word(dev->cmask + PCI_DEVICE_ID, 0xffff);
1018     dev->cmask[PCI_STATUS] = PCI_STATUS_CAP_LIST;
1019     dev->cmask[PCI_REVISION_ID] = 0xff;
1020     dev->cmask[PCI_CLASS_PROG] = 0xff;
1021     pci_set_word(dev->cmask + PCI_CLASS_DEVICE, 0xffff);
1022     dev->cmask[PCI_HEADER_TYPE] = 0xff;
1023     dev->cmask[PCI_CAPABILITY_LIST] = 0xff;
1024 }
1025 
pci_init_wmask(PCIDevice * dev)1026 static void pci_init_wmask(PCIDevice *dev)
1027 {
1028     int config_size = pci_config_size(dev);
1029 
1030     dev->wmask[PCI_CACHE_LINE_SIZE] = 0xff;
1031     dev->wmask[PCI_INTERRUPT_LINE] = 0xff;
1032     pci_set_word(dev->wmask + PCI_COMMAND,
1033                  PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
1034                  PCI_COMMAND_INTX_DISABLE);
1035     pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR);
1036 
1037     memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff,
1038            config_size - PCI_CONFIG_HEADER_SIZE);
1039 }
1040 
pci_init_w1cmask(PCIDevice * dev)1041 static void pci_init_w1cmask(PCIDevice *dev)
1042 {
1043     /*
1044      * Note: It's okay to set w1cmask even for readonly bits as
1045      * long as their value is hardwired to 0.
1046      */
1047     pci_set_word(dev->w1cmask + PCI_STATUS,
1048                  PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT |
1049                  PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT |
1050                  PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY);
1051 }
1052 
pci_init_mask_bridge(PCIDevice * d)1053 static void pci_init_mask_bridge(PCIDevice *d)
1054 {
1055     /* PCI_PRIMARY_BUS, PCI_SECONDARY_BUS, PCI_SUBORDINATE_BUS and
1056        PCI_SEC_LATENCY_TIMER */
1057     memset(d->wmask + PCI_PRIMARY_BUS, 0xff, 4);
1058 
1059     /* base and limit */
1060     d->wmask[PCI_IO_BASE] = PCI_IO_RANGE_MASK & 0xff;
1061     d->wmask[PCI_IO_LIMIT] = PCI_IO_RANGE_MASK & 0xff;
1062     pci_set_word(d->wmask + PCI_MEMORY_BASE,
1063                  PCI_MEMORY_RANGE_MASK & 0xffff);
1064     pci_set_word(d->wmask + PCI_MEMORY_LIMIT,
1065                  PCI_MEMORY_RANGE_MASK & 0xffff);
1066     pci_set_word(d->wmask + PCI_PREF_MEMORY_BASE,
1067                  PCI_PREF_RANGE_MASK & 0xffff);
1068     pci_set_word(d->wmask + PCI_PREF_MEMORY_LIMIT,
1069                  PCI_PREF_RANGE_MASK & 0xffff);
1070 
1071     /* PCI_PREF_BASE_UPPER32 and PCI_PREF_LIMIT_UPPER32 */
1072     memset(d->wmask + PCI_PREF_BASE_UPPER32, 0xff, 8);
1073 
1074     /* Supported memory and i/o types */
1075     d->config[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_16;
1076     d->config[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_16;
1077     pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_BASE,
1078                                PCI_PREF_RANGE_TYPE_64);
1079     pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_LIMIT,
1080                                PCI_PREF_RANGE_TYPE_64);
1081 
1082     /*
1083      * TODO: Bridges default to 10-bit VGA decoding but we currently only
1084      * implement 16-bit decoding (no alias support).
1085      */
1086     pci_set_word(d->wmask + PCI_BRIDGE_CONTROL,
1087                  PCI_BRIDGE_CTL_PARITY |
1088                  PCI_BRIDGE_CTL_SERR |
1089                  PCI_BRIDGE_CTL_ISA |
1090                  PCI_BRIDGE_CTL_VGA |
1091                  PCI_BRIDGE_CTL_VGA_16BIT |
1092                  PCI_BRIDGE_CTL_MASTER_ABORT |
1093                  PCI_BRIDGE_CTL_BUS_RESET |
1094                  PCI_BRIDGE_CTL_FAST_BACK |
1095                  PCI_BRIDGE_CTL_DISCARD |
1096                  PCI_BRIDGE_CTL_SEC_DISCARD |
1097                  PCI_BRIDGE_CTL_DISCARD_SERR);
1098     /* Below does not do anything as we never set this bit, put here for
1099      * completeness. */
1100     pci_set_word(d->w1cmask + PCI_BRIDGE_CONTROL,
1101                  PCI_BRIDGE_CTL_DISCARD_STATUS);
1102     d->cmask[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_MASK;
1103     d->cmask[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_MASK;
1104     pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_BASE,
1105                                PCI_PREF_RANGE_TYPE_MASK);
1106     pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_LIMIT,
1107                                PCI_PREF_RANGE_TYPE_MASK);
1108 }
1109 
pci_init_multifunction(PCIBus * bus,PCIDevice * dev,Error ** errp)1110 static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp)
1111 {
1112     uint8_t slot = PCI_SLOT(dev->devfn);
1113     uint8_t func;
1114 
1115     if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
1116         dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
1117     }
1118 
1119     /* SR/IOV is not handled here. */
1120     if (pci_is_vf(dev)) {
1121         return;
1122     }
1123 
1124     /*
1125      * multifunction bit is interpreted in two ways as follows.
1126      *   - all functions must set the bit to 1.
1127      *     Example: Intel X53
1128      *   - function 0 must set the bit, but the rest function (> 0)
1129      *     is allowed to leave the bit to 0.
1130      *     Example: PIIX3(also in qemu), PIIX4(also in qemu), ICH10,
1131      *
1132      * So OS (at least Linux) checks the bit of only function 0,
1133      * and doesn't see the bit of function > 0.
1134      *
1135      * The below check allows both interpretation.
1136      */
1137     if (PCI_FUNC(dev->devfn)) {
1138         PCIDevice *f0 = bus->devices[PCI_DEVFN(slot, 0)];
1139         if (f0 && !(f0->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) {
1140             /* function 0 should set multifunction bit */
1141             error_setg(errp, "PCI: single function device can't be populated "
1142                        "in function %x.%x", slot, PCI_FUNC(dev->devfn));
1143             return;
1144         }
1145         return;
1146     }
1147 
1148     if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
1149         return;
1150     }
1151     /* function 0 indicates single function, so function > 0 must be NULL */
1152     for (func = 1; func < PCI_FUNC_MAX; ++func) {
1153         PCIDevice *device = bus->devices[PCI_DEVFN(slot, func)];
1154         if (device && !pci_is_vf(device)) {
1155             error_setg(errp, "PCI: %x.0 indicates single function, "
1156                        "but %x.%x is already populated.",
1157                        slot, slot, func);
1158             return;
1159         }
1160     }
1161 }
1162 
pci_config_alloc(PCIDevice * pci_dev)1163 static void pci_config_alloc(PCIDevice *pci_dev)
1164 {
1165     int config_size = pci_config_size(pci_dev);
1166 
1167     pci_dev->config = g_malloc0(config_size);
1168     pci_dev->cmask = g_malloc0(config_size);
1169     pci_dev->wmask = g_malloc0(config_size);
1170     pci_dev->w1cmask = g_malloc0(config_size);
1171     pci_dev->used = g_malloc0(config_size);
1172 }
1173 
pci_config_free(PCIDevice * pci_dev)1174 static void pci_config_free(PCIDevice *pci_dev)
1175 {
1176     g_free(pci_dev->config);
1177     g_free(pci_dev->cmask);
1178     g_free(pci_dev->wmask);
1179     g_free(pci_dev->w1cmask);
1180     g_free(pci_dev->used);
1181 }
1182 
do_pci_unregister_device(PCIDevice * pci_dev)1183 static void do_pci_unregister_device(PCIDevice *pci_dev)
1184 {
1185     pci_get_bus(pci_dev)->devices[pci_dev->devfn] = NULL;
1186     pci_config_free(pci_dev);
1187 
1188     if (xen_mode == XEN_EMULATE) {
1189         xen_evtchn_remove_pci_device(pci_dev);
1190     }
1191     if (memory_region_is_mapped(&pci_dev->bus_master_enable_region)) {
1192         memory_region_del_subregion(&pci_dev->bus_master_container_region,
1193                                     &pci_dev->bus_master_enable_region);
1194     }
1195     address_space_destroy(&pci_dev->bus_master_as);
1196 }
1197 
1198 /* Extract PCIReqIDCache into BDF format */
pci_req_id_cache_extract(PCIReqIDCache * cache)1199 static uint16_t pci_req_id_cache_extract(PCIReqIDCache *cache)
1200 {
1201     uint8_t bus_n;
1202     uint16_t result;
1203 
1204     switch (cache->type) {
1205     case PCI_REQ_ID_BDF:
1206         result = pci_get_bdf(cache->dev);
1207         break;
1208     case PCI_REQ_ID_SECONDARY_BUS:
1209         bus_n = pci_dev_bus_num(cache->dev);
1210         result = PCI_BUILD_BDF(bus_n, 0);
1211         break;
1212     default:
1213         error_report("Invalid PCI requester ID cache type: %d",
1214                      cache->type);
1215         exit(1);
1216         break;
1217     }
1218 
1219     return result;
1220 }
1221 
1222 /* Parse bridges up to the root complex and return requester ID
1223  * cache for specific device.  For full PCIe topology, the cache
1224  * result would be exactly the same as getting BDF of the device.
1225  * However, several tricks are required when system mixed up with
1226  * legacy PCI devices and PCIe-to-PCI bridges.
1227  *
1228  * Here we cache the proxy device (and type) not requester ID since
1229  * bus number might change from time to time.
1230  */
pci_req_id_cache_get(PCIDevice * dev)1231 static PCIReqIDCache pci_req_id_cache_get(PCIDevice *dev)
1232 {
1233     PCIDevice *parent;
1234     PCIReqIDCache cache = {
1235         .dev = dev,
1236         .type = PCI_REQ_ID_BDF,
1237     };
1238 
1239     while (!pci_bus_is_root(pci_get_bus(dev))) {
1240         /* We are under PCI/PCIe bridges */
1241         parent = pci_get_bus(dev)->parent_dev;
1242         if (pci_is_express(parent)) {
1243             if (pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) {
1244                 /* When we pass through PCIe-to-PCI/PCIX bridges, we
1245                  * override the requester ID using secondary bus
1246                  * number of parent bridge with zeroed devfn
1247                  * (pcie-to-pci bridge spec chap 2.3). */
1248                 cache.type = PCI_REQ_ID_SECONDARY_BUS;
1249                 cache.dev = dev;
1250             }
1251         } else {
1252             /* Legacy PCI, override requester ID with the bridge's
1253              * BDF upstream.  When the root complex connects to
1254              * legacy PCI devices (including buses), it can only
1255              * obtain requester ID info from directly attached
1256              * devices.  If devices are attached under bridges, only
1257              * the requester ID of the bridge that is directly
1258              * attached to the root complex can be recognized. */
1259             cache.type = PCI_REQ_ID_BDF;
1260             cache.dev = parent;
1261         }
1262         dev = parent;
1263     }
1264 
1265     return cache;
1266 }
1267 
pci_requester_id(PCIDevice * dev)1268 uint16_t pci_requester_id(PCIDevice *dev)
1269 {
1270     return pci_req_id_cache_extract(&dev->requester_id_cache);
1271 }
1272 
pci_bus_devfn_available(PCIBus * bus,int devfn)1273 static bool pci_bus_devfn_available(PCIBus *bus, int devfn)
1274 {
1275     return !(bus->devices[devfn]);
1276 }
1277 
pci_bus_devfn_reserved(PCIBus * bus,int devfn)1278 static bool pci_bus_devfn_reserved(PCIBus *bus, int devfn)
1279 {
1280     return bus->slot_reserved_mask & (1UL << PCI_SLOT(devfn));
1281 }
1282 
pci_bus_get_slot_reserved_mask(PCIBus * bus)1283 uint32_t pci_bus_get_slot_reserved_mask(PCIBus *bus)
1284 {
1285     return bus->slot_reserved_mask;
1286 }
1287 
pci_bus_set_slot_reserved_mask(PCIBus * bus,uint32_t mask)1288 void pci_bus_set_slot_reserved_mask(PCIBus *bus, uint32_t mask)
1289 {
1290     bus->slot_reserved_mask |= mask;
1291 }
1292 
pci_bus_clear_slot_reserved_mask(PCIBus * bus,uint32_t mask)1293 void pci_bus_clear_slot_reserved_mask(PCIBus *bus, uint32_t mask)
1294 {
1295     bus->slot_reserved_mask &= ~mask;
1296 }
1297 
1298 /* -1 for devfn means auto assign */
do_pci_register_device(PCIDevice * pci_dev,const char * name,int devfn,Error ** errp)1299 static PCIDevice *do_pci_register_device(PCIDevice *pci_dev,
1300                                          const char *name, int devfn,
1301                                          Error **errp)
1302 {
1303     PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
1304     PCIConfigReadFunc *config_read = pc->config_read;
1305     PCIConfigWriteFunc *config_write = pc->config_write;
1306     Error *local_err = NULL;
1307     DeviceState *dev = DEVICE(pci_dev);
1308     PCIBus *bus = pci_get_bus(pci_dev);
1309     bool is_bridge = IS_PCI_BRIDGE(pci_dev);
1310 
1311     /* Only pci bridges can be attached to extra PCI root buses */
1312     if (pci_bus_is_root(bus) && bus->parent_dev && !is_bridge) {
1313         error_setg(errp,
1314                    "PCI: Only PCI/PCIe bridges can be plugged into %s",
1315                     bus->parent_dev->name);
1316         return NULL;
1317     }
1318 
1319     if (devfn < 0) {
1320         for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices);
1321             devfn += PCI_FUNC_MAX) {
1322             if (pci_bus_devfn_available(bus, devfn) &&
1323                    !pci_bus_devfn_reserved(bus, devfn)) {
1324                 goto found;
1325             }
1326         }
1327         error_setg(errp, "PCI: no slot/function available for %s, all in use "
1328                    "or reserved", name);
1329         return NULL;
1330     found: ;
1331     } else if (pci_bus_devfn_reserved(bus, devfn)) {
1332         error_setg(errp, "PCI: slot %d function %d not available for %s,"
1333                    " reserved",
1334                    PCI_SLOT(devfn), PCI_FUNC(devfn), name);
1335         return NULL;
1336     } else if (!pci_bus_devfn_available(bus, devfn)) {
1337         error_setg(errp, "PCI: slot %d function %d not available for %s,"
1338                    " in use by %s,id=%s",
1339                    PCI_SLOT(devfn), PCI_FUNC(devfn), name,
1340                    bus->devices[devfn]->name, bus->devices[devfn]->qdev.id);
1341         return NULL;
1342     }
1343 
1344     /*
1345      * Populating function 0 triggers a scan from the guest that
1346      * exposes other non-zero functions. Hence we need to ensure that
1347      * function 0 wasn't added yet.
1348      */
1349     if (dev->hotplugged && !pci_is_vf(pci_dev) &&
1350         pci_get_function_0(pci_dev)) {
1351         error_setg(errp, "PCI: slot %d function 0 already occupied by %s,"
1352                    " new func %s cannot be exposed to guest.",
1353                    PCI_SLOT(pci_get_function_0(pci_dev)->devfn),
1354                    pci_get_function_0(pci_dev)->name,
1355                    name);
1356 
1357        return NULL;
1358     }
1359 
1360     pci_dev->devfn = devfn;
1361     pci_dev->requester_id_cache = pci_req_id_cache_get(pci_dev);
1362     pstrcpy(pci_dev->name, sizeof(pci_dev->name), name);
1363 
1364     memory_region_init(&pci_dev->bus_master_container_region, OBJECT(pci_dev),
1365                        "bus master container", UINT64_MAX);
1366     address_space_init(&pci_dev->bus_master_as,
1367                        &pci_dev->bus_master_container_region, pci_dev->name);
1368     pci_dev->bus_master_as.max_bounce_buffer_size =
1369         pci_dev->max_bounce_buffer_size;
1370 
1371     if (phase_check(PHASE_MACHINE_READY)) {
1372         pci_init_bus_master(pci_dev);
1373     }
1374     pci_dev->irq_state = 0;
1375     pci_config_alloc(pci_dev);
1376 
1377     pci_config_set_vendor_id(pci_dev->config, pc->vendor_id);
1378     pci_config_set_device_id(pci_dev->config, pc->device_id);
1379     pci_config_set_revision(pci_dev->config, pc->revision);
1380     pci_config_set_class(pci_dev->config, pc->class_id);
1381 
1382     if (!is_bridge) {
1383         if (pc->subsystem_vendor_id || pc->subsystem_id) {
1384             pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
1385                          pc->subsystem_vendor_id);
1386             pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
1387                          pc->subsystem_id);
1388         } else {
1389             pci_set_default_subsystem_id(pci_dev);
1390         }
1391     } else {
1392         /* subsystem_vendor_id/subsystem_id are only for header type 0 */
1393         assert(!pc->subsystem_vendor_id);
1394         assert(!pc->subsystem_id);
1395     }
1396     pci_init_cmask(pci_dev);
1397     pci_init_wmask(pci_dev);
1398     pci_init_w1cmask(pci_dev);
1399     if (is_bridge) {
1400         pci_init_mask_bridge(pci_dev);
1401     }
1402     pci_init_multifunction(bus, pci_dev, &local_err);
1403     if (local_err) {
1404         error_propagate(errp, local_err);
1405         do_pci_unregister_device(pci_dev);
1406         return NULL;
1407     }
1408 
1409     if (!config_read)
1410         config_read = pci_default_read_config;
1411     if (!config_write)
1412         config_write = pci_default_write_config;
1413     pci_dev->config_read = config_read;
1414     pci_dev->config_write = config_write;
1415     bus->devices[devfn] = pci_dev;
1416     pci_dev->version_id = 2; /* Current pci device vmstate version */
1417     return pci_dev;
1418 }
1419 
pci_unregister_io_regions(PCIDevice * pci_dev)1420 static void pci_unregister_io_regions(PCIDevice *pci_dev)
1421 {
1422     PCIIORegion *r;
1423     int i;
1424 
1425     for(i = 0; i < PCI_NUM_REGIONS; i++) {
1426         r = &pci_dev->io_regions[i];
1427         if (!r->size || r->addr == PCI_BAR_UNMAPPED)
1428             continue;
1429         memory_region_del_subregion(r->address_space, r->memory);
1430     }
1431 
1432     pci_unregister_vga(pci_dev);
1433 }
1434 
pci_qdev_unrealize(DeviceState * dev)1435 static void pci_qdev_unrealize(DeviceState *dev)
1436 {
1437     PCIDevice *pci_dev = PCI_DEVICE(dev);
1438     PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
1439 
1440     pci_unregister_io_regions(pci_dev);
1441     pci_del_option_rom(pci_dev);
1442     pcie_sriov_unregister_device(pci_dev);
1443 
1444     if (pc->exit) {
1445         pc->exit(pci_dev);
1446     }
1447 
1448     pci_device_deassert_intx(pci_dev);
1449     do_pci_unregister_device(pci_dev);
1450 
1451     pci_dev->msi_trigger = NULL;
1452 
1453     /*
1454      * clean up acpi-index so it could reused by another device
1455      */
1456     if (pci_dev->acpi_index) {
1457         GSequence *used_indexes = pci_acpi_index_list();
1458 
1459         g_sequence_remove(g_sequence_lookup(used_indexes,
1460                           GINT_TO_POINTER(pci_dev->acpi_index),
1461                           g_cmp_uint32, NULL));
1462     }
1463 }
1464 
pci_register_bar(PCIDevice * pci_dev,int region_num,uint8_t type,MemoryRegion * memory)1465 void pci_register_bar(PCIDevice *pci_dev, int region_num,
1466                       uint8_t type, MemoryRegion *memory)
1467 {
1468     PCIIORegion *r;
1469     uint32_t addr; /* offset in pci config space */
1470     uint64_t wmask;
1471     pcibus_t size = memory_region_size(memory);
1472     uint8_t hdr_type;
1473 
1474     assert(region_num >= 0);
1475     assert(region_num < PCI_NUM_REGIONS);
1476     assert(is_power_of_2(size));
1477 
1478     /* A PCI bridge device (with Type 1 header) may only have at most 2 BARs */
1479     hdr_type =
1480         pci_dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION;
1481     assert(hdr_type != PCI_HEADER_TYPE_BRIDGE || region_num < 2);
1482 
1483     r = &pci_dev->io_regions[region_num];
1484     assert(!r->size);
1485     r->size = size;
1486     r->type = type;
1487     r->memory = memory;
1488     r->address_space = type & PCI_BASE_ADDRESS_SPACE_IO
1489                         ? pci_get_bus(pci_dev)->address_space_io
1490                         : pci_get_bus(pci_dev)->address_space_mem;
1491 
1492     if (pci_is_vf(pci_dev)) {
1493         r->addr = pci_bar_address(pci_dev, region_num, r->type, r->size);
1494         if (r->addr != PCI_BAR_UNMAPPED) {
1495             memory_region_add_subregion_overlap(r->address_space,
1496                                                 r->addr, r->memory, 1);
1497         }
1498     } else {
1499         r->addr = PCI_BAR_UNMAPPED;
1500 
1501         wmask = ~(size - 1);
1502         if (region_num == PCI_ROM_SLOT) {
1503             /* ROM enable bit is writable */
1504             wmask |= PCI_ROM_ADDRESS_ENABLE;
1505         }
1506 
1507         addr = pci_bar(pci_dev, region_num);
1508         pci_set_long(pci_dev->config + addr, type);
1509 
1510         if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) &&
1511             r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
1512             pci_set_quad(pci_dev->wmask + addr, wmask);
1513             pci_set_quad(pci_dev->cmask + addr, ~0ULL);
1514         } else {
1515             pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff);
1516             pci_set_long(pci_dev->cmask + addr, 0xffffffff);
1517         }
1518     }
1519 }
1520 
pci_update_vga(PCIDevice * pci_dev)1521 static void pci_update_vga(PCIDevice *pci_dev)
1522 {
1523     uint16_t cmd;
1524 
1525     if (!pci_dev->has_vga) {
1526         return;
1527     }
1528 
1529     cmd = pci_get_word(pci_dev->config + PCI_COMMAND);
1530 
1531     memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_MEM],
1532                               cmd & PCI_COMMAND_MEMORY);
1533     memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO],
1534                               cmd & PCI_COMMAND_IO);
1535     memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI],
1536                               cmd & PCI_COMMAND_IO);
1537 }
1538 
pci_register_vga(PCIDevice * pci_dev,MemoryRegion * mem,MemoryRegion * io_lo,MemoryRegion * io_hi)1539 void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem,
1540                       MemoryRegion *io_lo, MemoryRegion *io_hi)
1541 {
1542     PCIBus *bus = pci_get_bus(pci_dev);
1543 
1544     assert(!pci_dev->has_vga);
1545 
1546     assert(memory_region_size(mem) == QEMU_PCI_VGA_MEM_SIZE);
1547     pci_dev->vga_regions[QEMU_PCI_VGA_MEM] = mem;
1548     memory_region_add_subregion_overlap(bus->address_space_mem,
1549                                         QEMU_PCI_VGA_MEM_BASE, mem, 1);
1550 
1551     assert(memory_region_size(io_lo) == QEMU_PCI_VGA_IO_LO_SIZE);
1552     pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO] = io_lo;
1553     memory_region_add_subregion_overlap(bus->address_space_io,
1554                                         QEMU_PCI_VGA_IO_LO_BASE, io_lo, 1);
1555 
1556     assert(memory_region_size(io_hi) == QEMU_PCI_VGA_IO_HI_SIZE);
1557     pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI] = io_hi;
1558     memory_region_add_subregion_overlap(bus->address_space_io,
1559                                         QEMU_PCI_VGA_IO_HI_BASE, io_hi, 1);
1560     pci_dev->has_vga = true;
1561 
1562     pci_update_vga(pci_dev);
1563 }
1564 
pci_unregister_vga(PCIDevice * pci_dev)1565 void pci_unregister_vga(PCIDevice *pci_dev)
1566 {
1567     PCIBus *bus = pci_get_bus(pci_dev);
1568 
1569     if (!pci_dev->has_vga) {
1570         return;
1571     }
1572 
1573     memory_region_del_subregion(bus->address_space_mem,
1574                                 pci_dev->vga_regions[QEMU_PCI_VGA_MEM]);
1575     memory_region_del_subregion(bus->address_space_io,
1576                                 pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO]);
1577     memory_region_del_subregion(bus->address_space_io,
1578                                 pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI]);
1579     pci_dev->has_vga = false;
1580 }
1581 
pci_get_bar_addr(PCIDevice * pci_dev,int region_num)1582 pcibus_t pci_get_bar_addr(PCIDevice *pci_dev, int region_num)
1583 {
1584     return pci_dev->io_regions[region_num].addr;
1585 }
1586 
pci_config_get_bar_addr(PCIDevice * d,int reg,uint8_t type,pcibus_t size)1587 static pcibus_t pci_config_get_bar_addr(PCIDevice *d, int reg,
1588                                         uint8_t type, pcibus_t size)
1589 {
1590     pcibus_t new_addr;
1591     if (!pci_is_vf(d)) {
1592         int bar = pci_bar(d, reg);
1593         if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
1594             new_addr = pci_get_quad(d->config + bar);
1595         } else {
1596             new_addr = pci_get_long(d->config + bar);
1597         }
1598     } else {
1599         PCIDevice *pf = d->exp.sriov_vf.pf;
1600         uint16_t sriov_cap = pf->exp.sriov_cap;
1601         int bar = sriov_cap + PCI_SRIOV_BAR + reg * 4;
1602         uint16_t vf_offset =
1603             pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_OFFSET);
1604         uint16_t vf_stride =
1605             pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_STRIDE);
1606         uint32_t vf_num = d->devfn - (pf->devfn + vf_offset);
1607 
1608         if (vf_num) {
1609             vf_num /= vf_stride;
1610         }
1611 
1612         if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
1613             new_addr = pci_get_quad(pf->config + bar);
1614         } else {
1615             new_addr = pci_get_long(pf->config + bar);
1616         }
1617         new_addr += vf_num * size;
1618     }
1619     /* The ROM slot has a specific enable bit, keep it intact */
1620     if (reg != PCI_ROM_SLOT) {
1621         new_addr &= ~(size - 1);
1622     }
1623     return new_addr;
1624 }
1625 
pci_bar_address(PCIDevice * d,int reg,uint8_t type,pcibus_t size)1626 pcibus_t pci_bar_address(PCIDevice *d,
1627                          int reg, uint8_t type, pcibus_t size)
1628 {
1629     pcibus_t new_addr, last_addr;
1630     uint16_t cmd = pci_get_word(d->config + PCI_COMMAND);
1631     MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
1632     bool allow_0_address = mc->pci_allow_0_address;
1633 
1634     if (type & PCI_BASE_ADDRESS_SPACE_IO) {
1635         if (!(cmd & PCI_COMMAND_IO)) {
1636             return PCI_BAR_UNMAPPED;
1637         }
1638         new_addr = pci_config_get_bar_addr(d, reg, type, size);
1639         last_addr = new_addr + size - 1;
1640         /* Check if 32 bit BAR wraps around explicitly.
1641          * TODO: make priorities correct and remove this work around.
1642          */
1643         if (last_addr <= new_addr || last_addr >= UINT32_MAX ||
1644             (!allow_0_address && new_addr == 0)) {
1645             return PCI_BAR_UNMAPPED;
1646         }
1647         return new_addr;
1648     }
1649 
1650     if (!(cmd & PCI_COMMAND_MEMORY)) {
1651         return PCI_BAR_UNMAPPED;
1652     }
1653     new_addr = pci_config_get_bar_addr(d, reg, type, size);
1654     /* the ROM slot has a specific enable bit */
1655     if (reg == PCI_ROM_SLOT && !(new_addr & PCI_ROM_ADDRESS_ENABLE)) {
1656         return PCI_BAR_UNMAPPED;
1657     }
1658     new_addr &= ~(size - 1);
1659     last_addr = new_addr + size - 1;
1660     /* NOTE: we do not support wrapping */
1661     /* XXX: as we cannot support really dynamic
1662        mappings, we handle specific values as invalid
1663        mappings. */
1664     if (last_addr <= new_addr || last_addr == PCI_BAR_UNMAPPED ||
1665         (!allow_0_address && new_addr == 0)) {
1666         return PCI_BAR_UNMAPPED;
1667     }
1668 
1669     /* Now pcibus_t is 64bit.
1670      * Check if 32 bit BAR wraps around explicitly.
1671      * Without this, PC ide doesn't work well.
1672      * TODO: remove this work around.
1673      */
1674     if  (!(type & PCI_BASE_ADDRESS_MEM_TYPE_64) && last_addr >= UINT32_MAX) {
1675         return PCI_BAR_UNMAPPED;
1676     }
1677 
1678     /*
1679      * OS is allowed to set BAR beyond its addressable
1680      * bits. For example, 32 bit OS can set 64bit bar
1681      * to >4G. Check it. TODO: we might need to support
1682      * it in the future for e.g. PAE.
1683      */
1684     if (last_addr >= HWADDR_MAX) {
1685         return PCI_BAR_UNMAPPED;
1686     }
1687 
1688     return new_addr;
1689 }
1690 
pci_update_mappings(PCIDevice * d)1691 static void pci_update_mappings(PCIDevice *d)
1692 {
1693     PCIIORegion *r;
1694     int i;
1695     pcibus_t new_addr;
1696 
1697     for(i = 0; i < PCI_NUM_REGIONS; i++) {
1698         r = &d->io_regions[i];
1699 
1700         /* this region isn't registered */
1701         if (!r->size)
1702             continue;
1703 
1704         new_addr = pci_bar_address(d, i, r->type, r->size);
1705         if (!d->enabled || pci_pm_state(d)) {
1706             new_addr = PCI_BAR_UNMAPPED;
1707         }
1708 
1709         /* This bar isn't changed */
1710         if (new_addr == r->addr)
1711             continue;
1712 
1713         /* now do the real mapping */
1714         if (r->addr != PCI_BAR_UNMAPPED) {
1715             trace_pci_update_mappings_del(d->name, pci_dev_bus_num(d),
1716                                           PCI_SLOT(d->devfn),
1717                                           PCI_FUNC(d->devfn),
1718                                           i, r->addr, r->size);
1719             memory_region_del_subregion(r->address_space, r->memory);
1720         }
1721         r->addr = new_addr;
1722         if (r->addr != PCI_BAR_UNMAPPED) {
1723             trace_pci_update_mappings_add(d->name, pci_dev_bus_num(d),
1724                                           PCI_SLOT(d->devfn),
1725                                           PCI_FUNC(d->devfn),
1726                                           i, r->addr, r->size);
1727             memory_region_add_subregion_overlap(r->address_space,
1728                                                 r->addr, r->memory, 1);
1729         }
1730     }
1731 
1732     pci_update_vga(d);
1733 }
1734 
pci_irq_disabled(PCIDevice * d)1735 int pci_irq_disabled(PCIDevice *d)
1736 {
1737     return pci_get_word(d->config + PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE;
1738 }
1739 
1740 /* Called after interrupt disabled field update in config space,
1741  * assert/deassert interrupts if necessary.
1742  * Gets original interrupt disable bit value (before update). */
pci_update_irq_disabled(PCIDevice * d,int was_irq_disabled)1743 static void pci_update_irq_disabled(PCIDevice *d, int was_irq_disabled)
1744 {
1745     int i, disabled = pci_irq_disabled(d);
1746     if (disabled == was_irq_disabled)
1747         return;
1748     for (i = 0; i < PCI_NUM_PINS; ++i) {
1749         int state = pci_irq_state(d, i);
1750         pci_change_irq_level(d, i, disabled ? -state : state);
1751     }
1752 }
1753 
pci_default_read_config(PCIDevice * d,uint32_t address,int len)1754 uint32_t pci_default_read_config(PCIDevice *d,
1755                                  uint32_t address, int len)
1756 {
1757     uint32_t val = 0;
1758 
1759     assert(address + len <= pci_config_size(d));
1760 
1761     if (pci_is_express_downstream_port(d) &&
1762         ranges_overlap(address, len, d->exp.exp_cap + PCI_EXP_LNKSTA, 2)) {
1763         pcie_sync_bridge_lnk(d);
1764     }
1765     memcpy(&val, d->config + address, len);
1766     return le32_to_cpu(val);
1767 }
1768 
pci_default_write_config(PCIDevice * d,uint32_t addr,uint32_t val_in,int l)1769 void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int l)
1770 {
1771     uint8_t new_pm_state, old_pm_state = pci_pm_state(d);
1772     int i, was_irq_disabled = pci_irq_disabled(d);
1773     uint32_t val = val_in;
1774 
1775     assert(addr + l <= pci_config_size(d));
1776 
1777     for (i = 0; i < l; val >>= 8, ++i) {
1778         uint8_t wmask = d->wmask[addr + i];
1779         uint8_t w1cmask = d->w1cmask[addr + i];
1780         assert(!(wmask & w1cmask));
1781         d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask);
1782         d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */
1783     }
1784 
1785     new_pm_state = pci_pm_update(d, addr, l, old_pm_state);
1786 
1787     if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) ||
1788         ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) ||
1789         ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) ||
1790         range_covers_byte(addr, l, PCI_COMMAND) ||
1791         !!new_pm_state != !!old_pm_state) {
1792         pci_update_mappings(d);
1793     }
1794 
1795     if (ranges_overlap(addr, l, PCI_COMMAND, 2)) {
1796         pci_update_irq_disabled(d, was_irq_disabled);
1797         pci_set_master(d, (pci_get_word(d->config + PCI_COMMAND) &
1798                           PCI_COMMAND_MASTER) && d->enabled);
1799     }
1800 
1801     msi_write_config(d, addr, val_in, l);
1802     msix_write_config(d, addr, val_in, l);
1803     pcie_sriov_config_write(d, addr, val_in, l);
1804 }
1805 
1806 /***********************************************************/
1807 /* generic PCI irq support */
1808 
1809 /* 0 <= irq_num <= 3. level must be 0 or 1 */
pci_irq_handler(void * opaque,int irq_num,int level)1810 static void pci_irq_handler(void *opaque, int irq_num, int level)
1811 {
1812     PCIDevice *pci_dev = opaque;
1813     int change;
1814 
1815     assert(0 <= irq_num && irq_num < PCI_NUM_PINS);
1816     assert(level == 0 || level == 1);
1817     change = level - pci_irq_state(pci_dev, irq_num);
1818     if (!change)
1819         return;
1820 
1821     pci_set_irq_state(pci_dev, irq_num, level);
1822     pci_update_irq_status(pci_dev);
1823     if (pci_irq_disabled(pci_dev))
1824         return;
1825     pci_change_irq_level(pci_dev, irq_num, change);
1826 }
1827 
pci_allocate_irq(PCIDevice * pci_dev)1828 qemu_irq pci_allocate_irq(PCIDevice *pci_dev)
1829 {
1830     int intx = pci_intx(pci_dev);
1831     assert(0 <= intx && intx < PCI_NUM_PINS);
1832 
1833     return qemu_allocate_irq(pci_irq_handler, pci_dev, intx);
1834 }
1835 
pci_set_irq(PCIDevice * pci_dev,int level)1836 void pci_set_irq(PCIDevice *pci_dev, int level)
1837 {
1838     int intx = pci_intx(pci_dev);
1839     pci_irq_handler(pci_dev, intx, level);
1840 }
1841 
1842 /* Special hooks used by device assignment */
pci_bus_set_route_irq_fn(PCIBus * bus,pci_route_irq_fn route_intx_to_irq)1843 void pci_bus_set_route_irq_fn(PCIBus *bus, pci_route_irq_fn route_intx_to_irq)
1844 {
1845     assert(pci_bus_is_root(bus));
1846     bus->route_intx_to_irq = route_intx_to_irq;
1847 }
1848 
pci_device_route_intx_to_irq(PCIDevice * dev,int pin)1849 PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin)
1850 {
1851     PCIBus *bus;
1852 
1853     do {
1854         int dev_irq = pin;
1855         bus = pci_get_bus(dev);
1856         pin = bus->map_irq(dev, pin);
1857         trace_pci_route_irq(dev_irq, DEVICE(dev)->canonical_path, pin,
1858                             pci_bus_is_root(bus) ? "root-complex"
1859                                     : DEVICE(bus->parent_dev)->canonical_path);
1860         dev = bus->parent_dev;
1861     } while (dev);
1862 
1863     if (!bus->route_intx_to_irq) {
1864         error_report("PCI: Bug - unimplemented PCI INTx routing (%s)",
1865                      object_get_typename(OBJECT(bus->qbus.parent)));
1866         return (PCIINTxRoute) { PCI_INTX_DISABLED, -1 };
1867     }
1868 
1869     return bus->route_intx_to_irq(bus->irq_opaque, pin);
1870 }
1871 
pci_intx_route_changed(PCIINTxRoute * old,PCIINTxRoute * new)1872 bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new)
1873 {
1874     return old->mode != new->mode || old->irq != new->irq;
1875 }
1876 
pci_bus_fire_intx_routing_notifier(PCIBus * bus)1877 void pci_bus_fire_intx_routing_notifier(PCIBus *bus)
1878 {
1879     PCIDevice *dev;
1880     PCIBus *sec;
1881     int i;
1882 
1883     for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
1884         dev = bus->devices[i];
1885         if (dev && dev->intx_routing_notifier) {
1886             dev->intx_routing_notifier(dev);
1887         }
1888     }
1889 
1890     QLIST_FOREACH(sec, &bus->child, sibling) {
1891         pci_bus_fire_intx_routing_notifier(sec);
1892     }
1893 }
1894 
pci_device_set_intx_routing_notifier(PCIDevice * dev,PCIINTxRoutingNotifier notifier)1895 void pci_device_set_intx_routing_notifier(PCIDevice *dev,
1896                                           PCIINTxRoutingNotifier notifier)
1897 {
1898     dev->intx_routing_notifier = notifier;
1899 }
1900 
1901 /*
1902  * PCI-to-PCI bridge specification
1903  * 9.1: Interrupt routing. Table 9-1
1904  *
1905  * the PCI Express Base Specification, Revision 2.1
1906  * 2.2.8.1: INTx interrupt signaling - Rules
1907  *          the Implementation Note
1908  *          Table 2-20
1909  */
1910 /*
1911  * 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD
1912  * 0-origin unlike PCI interrupt pin register.
1913  */
pci_swizzle_map_irq_fn(PCIDevice * pci_dev,int pin)1914 int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin)
1915 {
1916     return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin);
1917 }
1918 
1919 /***********************************************************/
1920 /* monitor info on PCI */
1921 
1922 static const pci_class_desc pci_class_descriptions[] =
1923 {
1924     { 0x0001, "VGA controller", "display"},
1925     { 0x0100, "SCSI controller", "scsi"},
1926     { 0x0101, "IDE controller", "ide"},
1927     { 0x0102, "Floppy controller", "fdc"},
1928     { 0x0103, "IPI controller", "ipi"},
1929     { 0x0104, "RAID controller", "raid"},
1930     { 0x0106, "SATA controller"},
1931     { 0x0107, "SAS controller"},
1932     { 0x0180, "Storage controller"},
1933     { 0x0200, "Ethernet controller", "ethernet"},
1934     { 0x0201, "Token Ring controller", "token-ring"},
1935     { 0x0202, "FDDI controller", "fddi"},
1936     { 0x0203, "ATM controller", "atm"},
1937     { 0x0280, "Network controller"},
1938     { 0x0300, "VGA controller", "display", 0x00ff},
1939     { 0x0301, "XGA controller"},
1940     { 0x0302, "3D controller"},
1941     { 0x0380, "Display controller"},
1942     { 0x0400, "Video controller", "video"},
1943     { 0x0401, "Audio controller", "sound"},
1944     { 0x0402, "Phone"},
1945     { 0x0403, "Audio controller", "sound"},
1946     { 0x0480, "Multimedia controller"},
1947     { 0x0500, "RAM controller", "memory"},
1948     { 0x0501, "Flash controller", "flash"},
1949     { 0x0580, "Memory controller"},
1950     { 0x0600, "Host bridge", "host"},
1951     { 0x0601, "ISA bridge", "isa"},
1952     { 0x0602, "EISA bridge", "eisa"},
1953     { 0x0603, "MC bridge", "mca"},
1954     { 0x0604, "PCI bridge", "pci-bridge"},
1955     { 0x0605, "PCMCIA bridge", "pcmcia"},
1956     { 0x0606, "NUBUS bridge", "nubus"},
1957     { 0x0607, "CARDBUS bridge", "cardbus"},
1958     { 0x0608, "RACEWAY bridge"},
1959     { 0x0680, "Bridge"},
1960     { 0x0700, "Serial port", "serial"},
1961     { 0x0701, "Parallel port", "parallel"},
1962     { 0x0800, "Interrupt controller", "interrupt-controller"},
1963     { 0x0801, "DMA controller", "dma-controller"},
1964     { 0x0802, "Timer", "timer"},
1965     { 0x0803, "RTC", "rtc"},
1966     { 0x0900, "Keyboard", "keyboard"},
1967     { 0x0901, "Pen", "pen"},
1968     { 0x0902, "Mouse", "mouse"},
1969     { 0x0A00, "Dock station", "dock", 0x00ff},
1970     { 0x0B00, "i386 cpu", "cpu", 0x00ff},
1971     { 0x0c00, "Firewire controller", "firewire"},
1972     { 0x0c01, "Access bus controller", "access-bus"},
1973     { 0x0c02, "SSA controller", "ssa"},
1974     { 0x0c03, "USB controller", "usb"},
1975     { 0x0c04, "Fibre channel controller", "fibre-channel"},
1976     { 0x0c05, "SMBus"},
1977     { 0, NULL}
1978 };
1979 
pci_for_each_device_under_bus_reverse(PCIBus * bus,pci_bus_dev_fn fn,void * opaque)1980 void pci_for_each_device_under_bus_reverse(PCIBus *bus,
1981                                            pci_bus_dev_fn fn,
1982                                            void *opaque)
1983 {
1984     PCIDevice *d;
1985     int devfn;
1986 
1987     for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
1988         d = bus->devices[ARRAY_SIZE(bus->devices) - 1 - devfn];
1989         if (d) {
1990             fn(bus, d, opaque);
1991         }
1992     }
1993 }
1994 
pci_for_each_device_reverse(PCIBus * bus,int bus_num,pci_bus_dev_fn fn,void * opaque)1995 void pci_for_each_device_reverse(PCIBus *bus, int bus_num,
1996                                  pci_bus_dev_fn fn, void *opaque)
1997 {
1998     bus = pci_find_bus_nr(bus, bus_num);
1999 
2000     if (bus) {
2001         pci_for_each_device_under_bus_reverse(bus, fn, opaque);
2002     }
2003 }
2004 
pci_for_each_device_under_bus(PCIBus * bus,pci_bus_dev_fn fn,void * opaque)2005 void pci_for_each_device_under_bus(PCIBus *bus,
2006                                    pci_bus_dev_fn fn, void *opaque)
2007 {
2008     PCIDevice *d;
2009     int devfn;
2010 
2011     for(devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
2012         d = bus->devices[devfn];
2013         if (d) {
2014             fn(bus, d, opaque);
2015         }
2016     }
2017 }
2018 
pci_for_each_device(PCIBus * bus,int bus_num,pci_bus_dev_fn fn,void * opaque)2019 void pci_for_each_device(PCIBus *bus, int bus_num,
2020                          pci_bus_dev_fn fn, void *opaque)
2021 {
2022     bus = pci_find_bus_nr(bus, bus_num);
2023 
2024     if (bus) {
2025         pci_for_each_device_under_bus(bus, fn, opaque);
2026     }
2027 }
2028 
get_class_desc(int class)2029 const pci_class_desc *get_class_desc(int class)
2030 {
2031     const pci_class_desc *desc;
2032 
2033     desc = pci_class_descriptions;
2034     while (desc->desc && class != desc->class) {
2035         desc++;
2036     }
2037 
2038     return desc;
2039 }
2040 
pci_init_nic_devices(PCIBus * bus,const char * default_model)2041 void pci_init_nic_devices(PCIBus *bus, const char *default_model)
2042 {
2043     qemu_create_nic_bus_devices(&bus->qbus, TYPE_PCI_DEVICE, default_model,
2044                                 "virtio", "virtio-net-pci");
2045 }
2046 
pci_init_nic_in_slot(PCIBus * rootbus,const char * model,const char * alias,const char * devaddr)2047 bool pci_init_nic_in_slot(PCIBus *rootbus, const char *model,
2048                           const char *alias, const char *devaddr)
2049 {
2050     NICInfo *nd = qemu_find_nic_info(model, true, alias);
2051     int dom, busnr, devfn;
2052     PCIDevice *pci_dev;
2053     unsigned slot;
2054     PCIBus *bus;
2055 
2056     if (!nd) {
2057         return false;
2058     }
2059 
2060     if (!devaddr || pci_parse_devaddr(devaddr, &dom, &busnr, &slot, NULL) < 0) {
2061         error_report("Invalid PCI device address %s for device %s",
2062                      devaddr, model);
2063         exit(1);
2064     }
2065 
2066     if (dom != 0) {
2067         error_report("No support for non-zero PCI domains");
2068         exit(1);
2069     }
2070 
2071     devfn = PCI_DEVFN(slot, 0);
2072 
2073     bus = pci_find_bus_nr(rootbus, busnr);
2074     if (!bus) {
2075         error_report("Invalid PCI device address %s for device %s",
2076                      devaddr, model);
2077         exit(1);
2078     }
2079 
2080     pci_dev = pci_new(devfn, model);
2081     qdev_set_nic_properties(&pci_dev->qdev, nd);
2082     pci_realize_and_unref(pci_dev, bus, &error_fatal);
2083     return true;
2084 }
2085 
pci_vga_init(PCIBus * bus)2086 PCIDevice *pci_vga_init(PCIBus *bus)
2087 {
2088     vga_interface_created = true;
2089     switch (vga_interface_type) {
2090     case VGA_CIRRUS:
2091         return pci_create_simple(bus, -1, "cirrus-vga");
2092     case VGA_QXL:
2093         return pci_create_simple(bus, -1, "qxl-vga");
2094     case VGA_STD:
2095         return pci_create_simple(bus, -1, "VGA");
2096     case VGA_VMWARE:
2097         return pci_create_simple(bus, -1, "vmware-svga");
2098     case VGA_VIRTIO:
2099         return pci_create_simple(bus, -1, "virtio-vga");
2100     case VGA_NONE:
2101     default: /* Other non-PCI types. Checking for unsupported types is already
2102                 done in vl.c. */
2103         return NULL;
2104     }
2105 }
2106 
2107 /* Whether a given bus number is in range of the secondary
2108  * bus of the given bridge device. */
pci_secondary_bus_in_range(PCIDevice * dev,int bus_num)2109 static bool pci_secondary_bus_in_range(PCIDevice *dev, int bus_num)
2110 {
2111     return !(pci_get_word(dev->config + PCI_BRIDGE_CONTROL) &
2112              PCI_BRIDGE_CTL_BUS_RESET) /* Don't walk the bus if it's reset. */ &&
2113         dev->config[PCI_SECONDARY_BUS] <= bus_num &&
2114         bus_num <= dev->config[PCI_SUBORDINATE_BUS];
2115 }
2116 
2117 /* Whether a given bus number is in a range of a root bus */
pci_root_bus_in_range(PCIBus * bus,int bus_num)2118 static bool pci_root_bus_in_range(PCIBus *bus, int bus_num)
2119 {
2120     int i;
2121 
2122     for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
2123         PCIDevice *dev = bus->devices[i];
2124 
2125         if (dev && IS_PCI_BRIDGE(dev)) {
2126             if (pci_secondary_bus_in_range(dev, bus_num)) {
2127                 return true;
2128             }
2129         }
2130     }
2131 
2132     return false;
2133 }
2134 
pci_find_bus_nr(PCIBus * bus,int bus_num)2135 PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num)
2136 {
2137     PCIBus *sec;
2138 
2139     if (!bus) {
2140         return NULL;
2141     }
2142 
2143     if (pci_bus_num(bus) == bus_num) {
2144         return bus;
2145     }
2146 
2147     /* Consider all bus numbers in range for the host pci bridge. */
2148     if (!pci_bus_is_root(bus) &&
2149         !pci_secondary_bus_in_range(bus->parent_dev, bus_num)) {
2150         return NULL;
2151     }
2152 
2153     /* try child bus */
2154     for (; bus; bus = sec) {
2155         QLIST_FOREACH(sec, &bus->child, sibling) {
2156             if (pci_bus_num(sec) == bus_num) {
2157                 return sec;
2158             }
2159             /* PXB buses assumed to be children of bus 0 */
2160             if (pci_bus_is_root(sec)) {
2161                 if (pci_root_bus_in_range(sec, bus_num)) {
2162                     break;
2163                 }
2164             } else {
2165                 if (pci_secondary_bus_in_range(sec->parent_dev, bus_num)) {
2166                     break;
2167                 }
2168             }
2169         }
2170     }
2171 
2172     return NULL;
2173 }
2174 
pci_for_each_bus_depth_first(PCIBus * bus,pci_bus_ret_fn begin,pci_bus_fn end,void * parent_state)2175 void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin,
2176                                   pci_bus_fn end, void *parent_state)
2177 {
2178     PCIBus *sec;
2179     void *state;
2180 
2181     if (!bus) {
2182         return;
2183     }
2184 
2185     if (begin) {
2186         state = begin(bus, parent_state);
2187     } else {
2188         state = parent_state;
2189     }
2190 
2191     QLIST_FOREACH(sec, &bus->child, sibling) {
2192         pci_for_each_bus_depth_first(sec, begin, end, state);
2193     }
2194 
2195     if (end) {
2196         end(bus, state);
2197     }
2198 }
2199 
2200 
pci_find_device(PCIBus * bus,int bus_num,uint8_t devfn)2201 PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn)
2202 {
2203     bus = pci_find_bus_nr(bus, bus_num);
2204 
2205     if (!bus)
2206         return NULL;
2207 
2208     return bus->devices[devfn];
2209 }
2210 
2211 #define ONBOARD_INDEX_MAX (16 * 1024 - 1)
2212 
pci_qdev_realize(DeviceState * qdev,Error ** errp)2213 static void pci_qdev_realize(DeviceState *qdev, Error **errp)
2214 {
2215     PCIDevice *pci_dev = (PCIDevice *)qdev;
2216     PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
2217     ObjectClass *klass = OBJECT_CLASS(pc);
2218     Error *local_err = NULL;
2219     bool is_default_rom;
2220     uint16_t class_id;
2221 
2222     /*
2223      * capped by systemd (see: udev-builtin-net_id.c)
2224      * as it's the only known user honor it to avoid users
2225      * misconfigure QEMU and then wonder why acpi-index doesn't work
2226      */
2227     if (pci_dev->acpi_index > ONBOARD_INDEX_MAX) {
2228         error_setg(errp, "acpi-index should be less or equal to %u",
2229                    ONBOARD_INDEX_MAX);
2230         return;
2231     }
2232 
2233     /*
2234      * make sure that acpi-index is unique across all present PCI devices
2235      */
2236     if (pci_dev->acpi_index) {
2237         GSequence *used_indexes = pci_acpi_index_list();
2238 
2239         if (g_sequence_lookup(used_indexes,
2240                               GINT_TO_POINTER(pci_dev->acpi_index),
2241                               g_cmp_uint32, NULL)) {
2242             error_setg(errp, "a PCI device with acpi-index = %" PRIu32
2243                        " already exist", pci_dev->acpi_index);
2244             return;
2245         }
2246         g_sequence_insert_sorted(used_indexes,
2247                                  GINT_TO_POINTER(pci_dev->acpi_index),
2248                                  g_cmp_uint32, NULL);
2249     }
2250 
2251     if (pci_dev->romsize != UINT32_MAX && !is_power_of_2(pci_dev->romsize)) {
2252         error_setg(errp, "ROM size %u is not a power of two", pci_dev->romsize);
2253         return;
2254     }
2255 
2256     /* initialize cap_present for pci_is_express() and pci_config_size(),
2257      * Note that hybrid PCIs are not set automatically and need to manage
2258      * QEMU_PCI_CAP_EXPRESS manually */
2259     if (object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE) &&
2260        !object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE)) {
2261         pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
2262     }
2263 
2264     if (object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE)) {
2265         pci_dev->cap_present |= QEMU_PCIE_CAP_CXL;
2266     }
2267 
2268     pci_dev = do_pci_register_device(pci_dev,
2269                                      object_get_typename(OBJECT(qdev)),
2270                                      pci_dev->devfn, errp);
2271     if (pci_dev == NULL)
2272         return;
2273 
2274     if (pc->realize) {
2275         pc->realize(pci_dev, &local_err);
2276         if (local_err) {
2277             error_propagate(errp, local_err);
2278             do_pci_unregister_device(pci_dev);
2279             return;
2280         }
2281     }
2282 
2283     if (!pcie_sriov_register_device(pci_dev, errp)) {
2284         pci_qdev_unrealize(DEVICE(pci_dev));
2285         return;
2286     }
2287 
2288     /*
2289      * A PCIe Downstream Port that do not have ARI Forwarding enabled must
2290      * associate only Device 0 with the device attached to the bus
2291      * representing the Link from the Port (PCIe base spec rev 4.0 ver 0.3,
2292      * sec 7.3.1).
2293      * With ARI, PCI_SLOT() can return non-zero value as the traditional
2294      * 5-bit Device Number and 3-bit Function Number fields in its associated
2295      * Routing IDs, Requester IDs and Completer IDs are interpreted as a
2296      * single 8-bit Function Number. Hence, ignore ARI capable devices.
2297      */
2298     if (pci_is_express(pci_dev) &&
2299         !pcie_find_capability(pci_dev, PCI_EXT_CAP_ID_ARI) &&
2300         pcie_has_upstream_port(pci_dev) &&
2301         PCI_SLOT(pci_dev->devfn)) {
2302         warn_report("PCI: slot %d is not valid for %s,"
2303                     " parent device only allows plugging into slot 0.",
2304                     PCI_SLOT(pci_dev->devfn), pci_dev->name);
2305     }
2306 
2307     if (pci_dev->failover_pair_id) {
2308         if (!pci_bus_is_express(pci_get_bus(pci_dev))) {
2309             error_setg(errp, "failover primary device must be on "
2310                              "PCIExpress bus");
2311             pci_qdev_unrealize(DEVICE(pci_dev));
2312             return;
2313         }
2314         class_id = pci_get_word(pci_dev->config + PCI_CLASS_DEVICE);
2315         if (class_id != PCI_CLASS_NETWORK_ETHERNET) {
2316             error_setg(errp, "failover primary device is not an "
2317                              "Ethernet device");
2318             pci_qdev_unrealize(DEVICE(pci_dev));
2319             return;
2320         }
2321         if ((pci_dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)
2322             || (PCI_FUNC(pci_dev->devfn) != 0)) {
2323             error_setg(errp, "failover: primary device must be in its own "
2324                               "PCI slot");
2325             pci_qdev_unrealize(DEVICE(pci_dev));
2326             return;
2327         }
2328         qdev->allow_unplug_during_migration = true;
2329     }
2330 
2331     /* rom loading */
2332     is_default_rom = false;
2333     if (pci_dev->romfile == NULL && pc->romfile != NULL) {
2334         pci_dev->romfile = g_strdup(pc->romfile);
2335         is_default_rom = true;
2336     }
2337 
2338     pci_add_option_rom(pci_dev, is_default_rom, &local_err);
2339     if (local_err) {
2340         error_propagate(errp, local_err);
2341         pci_qdev_unrealize(DEVICE(pci_dev));
2342         return;
2343     }
2344 
2345     pci_set_power(pci_dev, true);
2346 
2347     pci_dev->msi_trigger = pci_msi_trigger;
2348 }
2349 
pci_new_internal(int devfn,bool multifunction,const char * name)2350 static PCIDevice *pci_new_internal(int devfn, bool multifunction,
2351                                    const char *name)
2352 {
2353     DeviceState *dev;
2354 
2355     dev = qdev_new(name);
2356     qdev_prop_set_int32(dev, "addr", devfn);
2357     qdev_prop_set_bit(dev, "multifunction", multifunction);
2358     return PCI_DEVICE(dev);
2359 }
2360 
pci_new_multifunction(int devfn,const char * name)2361 PCIDevice *pci_new_multifunction(int devfn, const char *name)
2362 {
2363     return pci_new_internal(devfn, true, name);
2364 }
2365 
pci_new(int devfn,const char * name)2366 PCIDevice *pci_new(int devfn, const char *name)
2367 {
2368     return pci_new_internal(devfn, false, name);
2369 }
2370 
pci_realize_and_unref(PCIDevice * dev,PCIBus * bus,Error ** errp)2371 bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp)
2372 {
2373     return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp);
2374 }
2375 
pci_create_simple_multifunction(PCIBus * bus,int devfn,const char * name)2376 PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn,
2377                                            const char *name)
2378 {
2379     PCIDevice *dev = pci_new_multifunction(devfn, name);
2380     pci_realize_and_unref(dev, bus, &error_fatal);
2381     return dev;
2382 }
2383 
pci_create_simple(PCIBus * bus,int devfn,const char * name)2384 PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name)
2385 {
2386     PCIDevice *dev = pci_new(devfn, name);
2387     pci_realize_and_unref(dev, bus, &error_fatal);
2388     return dev;
2389 }
2390 
pci_find_space(PCIDevice * pdev,uint8_t size)2391 static uint8_t pci_find_space(PCIDevice *pdev, uint8_t size)
2392 {
2393     int offset = PCI_CONFIG_HEADER_SIZE;
2394     int i;
2395     for (i = PCI_CONFIG_HEADER_SIZE; i < PCI_CONFIG_SPACE_SIZE; ++i) {
2396         if (pdev->used[i])
2397             offset = i + 1;
2398         else if (i - offset + 1 == size)
2399             return offset;
2400     }
2401     return 0;
2402 }
2403 
pci_find_capability_list(PCIDevice * pdev,uint8_t cap_id,uint8_t * prev_p)2404 static uint8_t pci_find_capability_list(PCIDevice *pdev, uint8_t cap_id,
2405                                         uint8_t *prev_p)
2406 {
2407     uint8_t next, prev;
2408 
2409     if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST))
2410         return 0;
2411 
2412     for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]);
2413          prev = next + PCI_CAP_LIST_NEXT)
2414         if (pdev->config[next + PCI_CAP_LIST_ID] == cap_id)
2415             break;
2416 
2417     if (prev_p)
2418         *prev_p = prev;
2419     return next;
2420 }
2421 
pci_find_capability_at_offset(PCIDevice * pdev,uint8_t offset)2422 static uint8_t pci_find_capability_at_offset(PCIDevice *pdev, uint8_t offset)
2423 {
2424     uint8_t next, prev, found = 0;
2425 
2426     if (!(pdev->used[offset])) {
2427         return 0;
2428     }
2429 
2430     assert(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST);
2431 
2432     for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]);
2433          prev = next + PCI_CAP_LIST_NEXT) {
2434         if (next <= offset && next > found) {
2435             found = next;
2436         }
2437     }
2438     return found;
2439 }
2440 
2441 /* Patch the PCI vendor and device ids in a PCI rom image if necessary.
2442    This is needed for an option rom which is used for more than one device. */
pci_patch_ids(PCIDevice * pdev,uint8_t * ptr,uint32_t size)2443 static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size)
2444 {
2445     uint16_t vendor_id;
2446     uint16_t device_id;
2447     uint16_t rom_vendor_id;
2448     uint16_t rom_device_id;
2449     uint16_t rom_magic;
2450     uint16_t pcir_offset;
2451     uint8_t checksum;
2452 
2453     /* Words in rom data are little endian (like in PCI configuration),
2454        so they can be read / written with pci_get_word / pci_set_word. */
2455 
2456     /* Only a valid rom will be patched. */
2457     rom_magic = pci_get_word(ptr);
2458     if (rom_magic != 0xaa55) {
2459         trace_pci_bad_rom_magic(rom_magic, 0xaa55);
2460         return;
2461     }
2462     pcir_offset = pci_get_word(ptr + 0x18);
2463     if (pcir_offset + 8 >= size || memcmp(ptr + pcir_offset, "PCIR", 4)) {
2464         trace_pci_bad_pcir_offset(pcir_offset);
2465         return;
2466     }
2467 
2468     vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
2469     device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
2470     rom_vendor_id = pci_get_word(ptr + pcir_offset + 4);
2471     rom_device_id = pci_get_word(ptr + pcir_offset + 6);
2472 
2473     trace_pci_rom_and_pci_ids(pdev->romfile, vendor_id, device_id,
2474                               rom_vendor_id, rom_device_id);
2475 
2476     checksum = ptr[6];
2477 
2478     if (vendor_id != rom_vendor_id) {
2479         /* Patch vendor id and checksum (at offset 6 for etherboot roms). */
2480         checksum += (uint8_t)rom_vendor_id + (uint8_t)(rom_vendor_id >> 8);
2481         checksum -= (uint8_t)vendor_id + (uint8_t)(vendor_id >> 8);
2482         trace_pci_rom_checksum_change(ptr[6], checksum);
2483         ptr[6] = checksum;
2484         pci_set_word(ptr + pcir_offset + 4, vendor_id);
2485     }
2486 
2487     if (device_id != rom_device_id) {
2488         /* Patch device id and checksum (at offset 6 for etherboot roms). */
2489         checksum += (uint8_t)rom_device_id + (uint8_t)(rom_device_id >> 8);
2490         checksum -= (uint8_t)device_id + (uint8_t)(device_id >> 8);
2491         trace_pci_rom_checksum_change(ptr[6], checksum);
2492         ptr[6] = checksum;
2493         pci_set_word(ptr + pcir_offset + 6, device_id);
2494     }
2495 }
2496 
2497 /* Add an option rom for the device */
pci_add_option_rom(PCIDevice * pdev,bool is_default_rom,Error ** errp)2498 static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom,
2499                                Error **errp)
2500 {
2501     int64_t size = 0;
2502     g_autofree char *path = NULL;
2503     char name[32];
2504     const VMStateDescription *vmsd;
2505 
2506     /*
2507      * In case of incoming migration ROM will come with migration stream, no
2508      * reason to load the file.  Neither we want to fail if local ROM file
2509      * mismatches with specified romsize.
2510      */
2511     bool load_file = !runstate_check(RUN_STATE_INMIGRATE);
2512 
2513     if (!pdev->romfile || !strlen(pdev->romfile)) {
2514         return;
2515     }
2516 
2517     if (!pdev->rom_bar) {
2518         /*
2519          * Load rom via fw_cfg instead of creating a rom bar,
2520          * for 0.11 compatibility.
2521          */
2522         int class = pci_get_word(pdev->config + PCI_CLASS_DEVICE);
2523 
2524         /*
2525          * Hot-plugged devices can't use the option ROM
2526          * if the rom bar is disabled.
2527          */
2528         if (DEVICE(pdev)->hotplugged) {
2529             error_setg(errp, "Hot-plugged device without ROM bar"
2530                        " can't have an option ROM");
2531             return;
2532         }
2533 
2534         if (class == 0x0300) {
2535             rom_add_vga(pdev->romfile);
2536         } else {
2537             rom_add_option(pdev->romfile, -1);
2538         }
2539         return;
2540     }
2541 
2542     if (pci_is_vf(pdev)) {
2543         if (pdev->rom_bar > 0) {
2544             error_setg(errp, "ROM BAR cannot be enabled for SR-IOV VF");
2545         }
2546 
2547         return;
2548     }
2549 
2550     if (load_file || pdev->romsize == UINT32_MAX) {
2551         path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile);
2552         if (path == NULL) {
2553             path = g_strdup(pdev->romfile);
2554         }
2555 
2556         size = get_image_size(path);
2557         if (size < 0) {
2558             error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile);
2559             return;
2560         } else if (size == 0) {
2561             error_setg(errp, "romfile \"%s\" is empty", pdev->romfile);
2562             return;
2563         } else if (size > 2 * GiB) {
2564             error_setg(errp,
2565                        "romfile \"%s\" too large (size cannot exceed 2 GiB)",
2566                        pdev->romfile);
2567             return;
2568         }
2569         if (pdev->romsize != UINT_MAX) {
2570             if (size > pdev->romsize) {
2571                 error_setg(errp, "romfile \"%s\" (%u bytes) "
2572                            "is too large for ROM size %u",
2573                            pdev->romfile, (uint32_t)size, pdev->romsize);
2574                 return;
2575             }
2576         } else {
2577             pdev->romsize = pow2ceil(size);
2578         }
2579     }
2580 
2581     vmsd = qdev_get_vmsd(DEVICE(pdev));
2582     snprintf(name, sizeof(name), "%s.rom",
2583              vmsd ? vmsd->name : object_get_typename(OBJECT(pdev)));
2584 
2585     pdev->has_rom = true;
2586     memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize,
2587                            &error_fatal);
2588 
2589     if (load_file) {
2590         void *ptr = memory_region_get_ram_ptr(&pdev->rom);
2591 
2592         if (load_image_size(path, ptr, size) < 0) {
2593             error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile);
2594             return;
2595         }
2596 
2597         if (is_default_rom) {
2598             /* Only the default rom images will be patched (if needed). */
2599             pci_patch_ids(pdev, ptr, size);
2600         }
2601     }
2602 
2603     pci_register_bar(pdev, PCI_ROM_SLOT, 0, &pdev->rom);
2604 }
2605 
pci_del_option_rom(PCIDevice * pdev)2606 static void pci_del_option_rom(PCIDevice *pdev)
2607 {
2608     if (!pdev->has_rom)
2609         return;
2610 
2611     vmstate_unregister_ram(&pdev->rom, &pdev->qdev);
2612     pdev->has_rom = false;
2613 }
2614 
2615 /*
2616  * On success, pci_add_capability() returns a positive value
2617  * that the offset of the pci capability.
2618  * On failure, it sets an error and returns a negative error
2619  * code.
2620  */
pci_add_capability(PCIDevice * pdev,uint8_t cap_id,uint8_t offset,uint8_t size,Error ** errp)2621 int pci_add_capability(PCIDevice *pdev, uint8_t cap_id,
2622                        uint8_t offset, uint8_t size,
2623                        Error **errp)
2624 {
2625     uint8_t *config;
2626     int i, overlapping_cap;
2627 
2628     if (!offset) {
2629         offset = pci_find_space(pdev, size);
2630         /* out of PCI config space is programming error */
2631         assert(offset);
2632     } else {
2633         /* Verify that capabilities don't overlap.  Note: device assignment
2634          * depends on this check to verify that the device is not broken.
2635          * Should never trigger for emulated devices, but it's helpful
2636          * for debugging these. */
2637         for (i = offset; i < offset + size; i++) {
2638             overlapping_cap = pci_find_capability_at_offset(pdev, i);
2639             if (overlapping_cap) {
2640                 error_setg(errp, "%s:%02x:%02x.%x "
2641                            "Attempt to add PCI capability %x at offset "
2642                            "%x overlaps existing capability %x at offset %x",
2643                            pci_root_bus_path(pdev), pci_dev_bus_num(pdev),
2644                            PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2645                            cap_id, offset, overlapping_cap, i);
2646                 return -EINVAL;
2647             }
2648         }
2649     }
2650 
2651     config = pdev->config + offset;
2652     config[PCI_CAP_LIST_ID] = cap_id;
2653     config[PCI_CAP_LIST_NEXT] = pdev->config[PCI_CAPABILITY_LIST];
2654     pdev->config[PCI_CAPABILITY_LIST] = offset;
2655     pdev->config[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
2656     memset(pdev->used + offset, 0xFF, QEMU_ALIGN_UP(size, 4));
2657     /* Make capability read-only by default */
2658     memset(pdev->wmask + offset, 0, size);
2659     /* Check capability by default */
2660     memset(pdev->cmask + offset, 0xFF, size);
2661     return offset;
2662 }
2663 
2664 /* Unlink capability from the pci config space. */
pci_del_capability(PCIDevice * pdev,uint8_t cap_id,uint8_t size)2665 void pci_del_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t size)
2666 {
2667     uint8_t prev, offset = pci_find_capability_list(pdev, cap_id, &prev);
2668     if (!offset)
2669         return;
2670     pdev->config[prev] = pdev->config[offset + PCI_CAP_LIST_NEXT];
2671     /* Make capability writable again */
2672     memset(pdev->wmask + offset, 0xff, size);
2673     memset(pdev->w1cmask + offset, 0, size);
2674     /* Clear cmask as device-specific registers can't be checked */
2675     memset(pdev->cmask + offset, 0, size);
2676     memset(pdev->used + offset, 0, QEMU_ALIGN_UP(size, 4));
2677 
2678     if (!pdev->config[PCI_CAPABILITY_LIST])
2679         pdev->config[PCI_STATUS] &= ~PCI_STATUS_CAP_LIST;
2680 }
2681 
pci_find_capability(PCIDevice * pdev,uint8_t cap_id)2682 uint8_t pci_find_capability(PCIDevice *pdev, uint8_t cap_id)
2683 {
2684     return pci_find_capability_list(pdev, cap_id, NULL);
2685 }
2686 
pci_dev_fw_name(DeviceState * dev,char * buf,int len)2687 static char *pci_dev_fw_name(DeviceState *dev, char *buf, int len)
2688 {
2689     PCIDevice *d = (PCIDevice *)dev;
2690     const char *name = NULL;
2691     const pci_class_desc *desc =  pci_class_descriptions;
2692     int class = pci_get_word(d->config + PCI_CLASS_DEVICE);
2693 
2694     while (desc->desc &&
2695           (class & ~desc->fw_ign_bits) !=
2696           (desc->class & ~desc->fw_ign_bits)) {
2697         desc++;
2698     }
2699 
2700     if (desc->desc) {
2701         name = desc->fw_name;
2702     }
2703 
2704     if (name) {
2705         pstrcpy(buf, len, name);
2706     } else {
2707         snprintf(buf, len, "pci%04x,%04x",
2708                  pci_get_word(d->config + PCI_VENDOR_ID),
2709                  pci_get_word(d->config + PCI_DEVICE_ID));
2710     }
2711 
2712     return buf;
2713 }
2714 
pcibus_get_fw_dev_path(DeviceState * dev)2715 static char *pcibus_get_fw_dev_path(DeviceState *dev)
2716 {
2717     PCIDevice *d = (PCIDevice *)dev;
2718     char name[33];
2719     int has_func = !!PCI_FUNC(d->devfn);
2720 
2721     return g_strdup_printf("%s@%x%s%.*x",
2722                            pci_dev_fw_name(dev, name, sizeof(name)),
2723                            PCI_SLOT(d->devfn),
2724                            has_func ? "," : "",
2725                            has_func,
2726                            PCI_FUNC(d->devfn));
2727 }
2728 
pcibus_get_dev_path(DeviceState * dev)2729 static char *pcibus_get_dev_path(DeviceState *dev)
2730 {
2731     PCIDevice *d = container_of(dev, PCIDevice, qdev);
2732     PCIDevice *t;
2733     int slot_depth;
2734     /* Path format: Domain:00:Slot.Function:Slot.Function....:Slot.Function.
2735      * 00 is added here to make this format compatible with
2736      * domain:Bus:Slot.Func for systems without nested PCI bridges.
2737      * Slot.Function list specifies the slot and function numbers for all
2738      * devices on the path from root to the specific device. */
2739     const char *root_bus_path;
2740     int root_bus_len;
2741     char slot[] = ":SS.F";
2742     int slot_len = sizeof slot - 1 /* For '\0' */;
2743     int path_len;
2744     char *path, *p;
2745     int s;
2746 
2747     root_bus_path = pci_root_bus_path(d);
2748     root_bus_len = strlen(root_bus_path);
2749 
2750     /* Calculate # of slots on path between device and root. */;
2751     slot_depth = 0;
2752     for (t = d; t; t = pci_get_bus(t)->parent_dev) {
2753         ++slot_depth;
2754     }
2755 
2756     path_len = root_bus_len + slot_len * slot_depth;
2757 
2758     /* Allocate memory, fill in the terminating null byte. */
2759     path = g_malloc(path_len + 1 /* For '\0' */);
2760     path[path_len] = '\0';
2761 
2762     memcpy(path, root_bus_path, root_bus_len);
2763 
2764     /* Fill in slot numbers. We walk up from device to root, so need to print
2765      * them in the reverse order, last to first. */
2766     p = path + path_len;
2767     for (t = d; t; t = pci_get_bus(t)->parent_dev) {
2768         p -= slot_len;
2769         s = snprintf(slot, sizeof slot, ":%02x.%x",
2770                      PCI_SLOT(t->devfn), PCI_FUNC(t->devfn));
2771         assert(s == slot_len);
2772         memcpy(p, slot, slot_len);
2773     }
2774 
2775     return path;
2776 }
2777 
pci_qdev_find_recursive(PCIBus * bus,const char * id,PCIDevice ** pdev)2778 static int pci_qdev_find_recursive(PCIBus *bus,
2779                                    const char *id, PCIDevice **pdev)
2780 {
2781     DeviceState *qdev = qdev_find_recursive(&bus->qbus, id);
2782     if (!qdev) {
2783         return -ENODEV;
2784     }
2785 
2786     /* roughly check if given qdev is pci device */
2787     if (object_dynamic_cast(OBJECT(qdev), TYPE_PCI_DEVICE)) {
2788         *pdev = PCI_DEVICE(qdev);
2789         return 0;
2790     }
2791     return -EINVAL;
2792 }
2793 
pci_qdev_find_device(const char * id,PCIDevice ** pdev)2794 int pci_qdev_find_device(const char *id, PCIDevice **pdev)
2795 {
2796     PCIHostState *host_bridge;
2797     int rc = -ENODEV;
2798 
2799     QLIST_FOREACH(host_bridge, &pci_host_bridges, next) {
2800         int tmp = pci_qdev_find_recursive(host_bridge->bus, id, pdev);
2801         if (!tmp) {
2802             rc = 0;
2803             break;
2804         }
2805         if (tmp != -ENODEV) {
2806             rc = tmp;
2807         }
2808     }
2809 
2810     return rc;
2811 }
2812 
pci_address_space(PCIDevice * dev)2813 MemoryRegion *pci_address_space(PCIDevice *dev)
2814 {
2815     return pci_get_bus(dev)->address_space_mem;
2816 }
2817 
pci_address_space_io(PCIDevice * dev)2818 MemoryRegion *pci_address_space_io(PCIDevice *dev)
2819 {
2820     return pci_get_bus(dev)->address_space_io;
2821 }
2822 
pci_device_class_init(ObjectClass * klass,const void * data)2823 static void pci_device_class_init(ObjectClass *klass, const void *data)
2824 {
2825     DeviceClass *k = DEVICE_CLASS(klass);
2826 
2827     k->realize = pci_qdev_realize;
2828     k->unrealize = pci_qdev_unrealize;
2829     k->bus_type = TYPE_PCI_BUS;
2830     device_class_set_props(k, pci_props);
2831     object_class_property_set_description(
2832         klass, "x-max-bounce-buffer-size",
2833         "Maximum buffer size allocated for bounce buffers used for mapped "
2834         "access to indirect DMA memory");
2835 }
2836 
pci_device_class_base_init(ObjectClass * klass,const void * data)2837 static void pci_device_class_base_init(ObjectClass *klass, const void *data)
2838 {
2839     if (!object_class_is_abstract(klass)) {
2840         ObjectClass *conventional =
2841             object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE);
2842         ObjectClass *pcie =
2843             object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE);
2844         ObjectClass *cxl =
2845             object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE);
2846         assert(conventional || pcie || cxl);
2847     }
2848 }
2849 
2850 /*
2851  * Get IOMMU root bus, aliased bus and devfn of a PCI device
2852  *
2853  * IOMMU root bus is needed by all call sites to call into iommu_ops.
2854  * For call sites which don't need aliased BDF, passing NULL to
2855  * aliased_[bus|devfn] is allowed.
2856  *
2857  * @piommu_bus: return root #PCIBus backed by an IOMMU for the PCI device.
2858  *
2859  * @aliased_bus: return aliased #PCIBus of the PCI device, optional.
2860  *
2861  * @aliased_devfn: return aliased devfn of the PCI device, optional.
2862  */
pci_device_get_iommu_bus_devfn(PCIDevice * dev,PCIBus ** piommu_bus,PCIBus ** aliased_bus,int * aliased_devfn)2863 static void pci_device_get_iommu_bus_devfn(PCIDevice *dev,
2864                                            PCIBus **piommu_bus,
2865                                            PCIBus **aliased_bus,
2866                                            int *aliased_devfn)
2867 {
2868     PCIBus *bus = pci_get_bus(dev);
2869     PCIBus *iommu_bus = bus;
2870     int devfn = dev->devfn;
2871 
2872     while (iommu_bus && !iommu_bus->iommu_ops && iommu_bus->parent_dev) {
2873         PCIBus *parent_bus = pci_get_bus(iommu_bus->parent_dev);
2874 
2875         /*
2876          * The requester ID of the provided device may be aliased, as seen from
2877          * the IOMMU, due to topology limitations.  The IOMMU relies on a
2878          * requester ID to provide a unique AddressSpace for devices, but
2879          * conventional PCI buses pre-date such concepts.  Instead, the PCIe-
2880          * to-PCI bridge creates and accepts transactions on behalf of down-
2881          * stream devices.  When doing so, all downstream devices are masked
2882          * (aliased) behind a single requester ID.  The requester ID used
2883          * depends on the format of the bridge devices.  Proper PCIe-to-PCI
2884          * bridges, with a PCIe capability indicating such, follow the
2885          * guidelines of chapter 2.3 of the PCIe-to-PCI/X bridge specification,
2886          * where the bridge uses the seconary bus as the bridge portion of the
2887          * requester ID and devfn of 00.0.  For other bridges, typically those
2888          * found on the root complex such as the dmi-to-pci-bridge, we follow
2889          * the convention of typical bare-metal hardware, which uses the
2890          * requester ID of the bridge itself.  There are device specific
2891          * exceptions to these rules, but these are the defaults that the
2892          * Linux kernel uses when determining DMA aliases itself and believed
2893          * to be true for the bare metal equivalents of the devices emulated
2894          * in QEMU.
2895          */
2896         if (!pci_bus_is_express(iommu_bus)) {
2897             PCIDevice *parent = iommu_bus->parent_dev;
2898 
2899             if (pci_is_express(parent) &&
2900                 pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) {
2901                 devfn = PCI_DEVFN(0, 0);
2902                 bus = iommu_bus;
2903             } else {
2904                 devfn = parent->devfn;
2905                 bus = parent_bus;
2906             }
2907         }
2908 
2909         iommu_bus = parent_bus;
2910     }
2911 
2912     assert(0 <= devfn && devfn < PCI_DEVFN_MAX);
2913     assert(iommu_bus);
2914 
2915     if (pci_bus_bypass_iommu(bus) || !iommu_bus->iommu_ops) {
2916         iommu_bus = NULL;
2917     }
2918 
2919     *piommu_bus = iommu_bus;
2920 
2921     if (aliased_bus) {
2922         *aliased_bus = bus;
2923     }
2924 
2925     if (aliased_devfn) {
2926         *aliased_devfn = devfn;
2927     }
2928 }
2929 
pci_device_iommu_address_space(PCIDevice * dev)2930 AddressSpace *pci_device_iommu_address_space(PCIDevice *dev)
2931 {
2932     PCIBus *bus;
2933     PCIBus *iommu_bus;
2934     int devfn;
2935 
2936     pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn);
2937     if (iommu_bus) {
2938         return iommu_bus->iommu_ops->get_address_space(bus,
2939                                  iommu_bus->iommu_opaque, devfn);
2940     }
2941     return &address_space_memory;
2942 }
2943 
pci_iommu_init_iotlb_notifier(PCIDevice * dev,IOMMUNotifier * n,IOMMUNotify fn,void * opaque)2944 int pci_iommu_init_iotlb_notifier(PCIDevice *dev, IOMMUNotifier *n,
2945                                   IOMMUNotify fn, void *opaque)
2946 {
2947     PCIBus *bus;
2948     PCIBus *iommu_bus;
2949     int devfn;
2950 
2951     pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
2952     if (iommu_bus && iommu_bus->iommu_ops->init_iotlb_notifier) {
2953         iommu_bus->iommu_ops->init_iotlb_notifier(bus, iommu_bus->iommu_opaque,
2954                                                   devfn, n, fn, opaque);
2955         return 0;
2956     }
2957 
2958     return -ENODEV;
2959 }
2960 
pci_device_set_iommu_device(PCIDevice * dev,HostIOMMUDevice * hiod,Error ** errp)2961 bool pci_device_set_iommu_device(PCIDevice *dev, HostIOMMUDevice *hiod,
2962                                  Error **errp)
2963 {
2964     PCIBus *iommu_bus, *aliased_bus;
2965     int aliased_devfn;
2966 
2967     /* set_iommu_device requires device's direct BDF instead of aliased BDF */
2968     pci_device_get_iommu_bus_devfn(dev, &iommu_bus,
2969                                    &aliased_bus, &aliased_devfn);
2970     if (iommu_bus && iommu_bus->iommu_ops->set_iommu_device) {
2971         hiod->aliased_bus = aliased_bus;
2972         hiod->aliased_devfn = aliased_devfn;
2973         return iommu_bus->iommu_ops->set_iommu_device(pci_get_bus(dev),
2974                                                       iommu_bus->iommu_opaque,
2975                                                       dev->devfn, hiod, errp);
2976     }
2977     return true;
2978 }
2979 
pci_device_unset_iommu_device(PCIDevice * dev)2980 void pci_device_unset_iommu_device(PCIDevice *dev)
2981 {
2982     PCIBus *iommu_bus;
2983 
2984     pci_device_get_iommu_bus_devfn(dev, &iommu_bus, NULL, NULL);
2985     if (iommu_bus && iommu_bus->iommu_ops->unset_iommu_device) {
2986         return iommu_bus->iommu_ops->unset_iommu_device(pci_get_bus(dev),
2987                                                         iommu_bus->iommu_opaque,
2988                                                         dev->devfn);
2989     }
2990 }
2991 
pci_pri_request_page(PCIDevice * dev,uint32_t pasid,bool priv_req,bool exec_req,hwaddr addr,bool lpig,uint16_t prgi,bool is_read,bool is_write)2992 int pci_pri_request_page(PCIDevice *dev, uint32_t pasid, bool priv_req,
2993                          bool exec_req, hwaddr addr, bool lpig,
2994                          uint16_t prgi, bool is_read, bool is_write)
2995 {
2996     PCIBus *bus;
2997     PCIBus *iommu_bus;
2998     int devfn;
2999 
3000     if (!dev->is_master ||
3001             ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev))) {
3002         return -EPERM;
3003     }
3004 
3005     if (!pcie_pri_enabled(dev)) {
3006         return -EPERM;
3007     }
3008 
3009     pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
3010     if (iommu_bus && iommu_bus->iommu_ops->pri_request_page) {
3011         return iommu_bus->iommu_ops->pri_request_page(bus,
3012                                                      iommu_bus->iommu_opaque,
3013                                                      devfn, pasid, priv_req,
3014                                                      exec_req, addr, lpig, prgi,
3015                                                      is_read, is_write);
3016     }
3017 
3018     return -ENODEV;
3019 }
3020 
pci_pri_register_notifier(PCIDevice * dev,uint32_t pasid,IOMMUPRINotifier * notifier)3021 int pci_pri_register_notifier(PCIDevice *dev, uint32_t pasid,
3022                               IOMMUPRINotifier *notifier)
3023 {
3024     PCIBus *bus;
3025     PCIBus *iommu_bus;
3026     int devfn;
3027 
3028     if (!dev->is_master ||
3029             ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev))) {
3030         return -EPERM;
3031     }
3032 
3033     pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
3034     if (iommu_bus && iommu_bus->iommu_ops->pri_register_notifier) {
3035         iommu_bus->iommu_ops->pri_register_notifier(bus,
3036                                                     iommu_bus->iommu_opaque,
3037                                                     devfn, pasid, notifier);
3038         return 0;
3039     }
3040 
3041     return -ENODEV;
3042 }
3043 
pci_pri_unregister_notifier(PCIDevice * dev,uint32_t pasid)3044 void pci_pri_unregister_notifier(PCIDevice *dev, uint32_t pasid)
3045 {
3046     PCIBus *bus;
3047     PCIBus *iommu_bus;
3048     int devfn;
3049 
3050     pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
3051     if (iommu_bus && iommu_bus->iommu_ops->pri_unregister_notifier) {
3052         iommu_bus->iommu_ops->pri_unregister_notifier(bus,
3053                                                       iommu_bus->iommu_opaque,
3054                                                       devfn, pasid);
3055     }
3056 }
3057 
pci_ats_request_translation(PCIDevice * dev,uint32_t pasid,bool priv_req,bool exec_req,hwaddr addr,size_t length,bool no_write,IOMMUTLBEntry * result,size_t result_length,uint32_t * err_count)3058 ssize_t pci_ats_request_translation(PCIDevice *dev, uint32_t pasid,
3059                                     bool priv_req, bool exec_req,
3060                                     hwaddr addr, size_t length,
3061                                     bool no_write, IOMMUTLBEntry *result,
3062                                     size_t result_length,
3063                                     uint32_t *err_count)
3064 {
3065     PCIBus *bus;
3066     PCIBus *iommu_bus;
3067     int devfn;
3068 
3069     if (!dev->is_master ||
3070             ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev))) {
3071         return -EPERM;
3072     }
3073 
3074     if (result_length == 0) {
3075         return -ENOSPC;
3076     }
3077 
3078     if (!pcie_ats_enabled(dev)) {
3079         return -EPERM;
3080     }
3081 
3082     pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
3083     if (iommu_bus && iommu_bus->iommu_ops->ats_request_translation) {
3084         return iommu_bus->iommu_ops->ats_request_translation(bus,
3085                                                      iommu_bus->iommu_opaque,
3086                                                      devfn, pasid, priv_req,
3087                                                      exec_req, addr, length,
3088                                                      no_write, result,
3089                                                      result_length, err_count);
3090     }
3091 
3092     return -ENODEV;
3093 }
3094 
pci_iommu_register_iotlb_notifier(PCIDevice * dev,uint32_t pasid,IOMMUNotifier * n)3095 int pci_iommu_register_iotlb_notifier(PCIDevice *dev, uint32_t pasid,
3096                                       IOMMUNotifier *n)
3097 {
3098     PCIBus *bus;
3099     PCIBus *iommu_bus;
3100     int devfn;
3101 
3102     if ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev)) {
3103         return -EPERM;
3104     }
3105 
3106     pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
3107     if (iommu_bus && iommu_bus->iommu_ops->register_iotlb_notifier) {
3108         iommu_bus->iommu_ops->register_iotlb_notifier(bus,
3109                                            iommu_bus->iommu_opaque, devfn,
3110                                            pasid, n);
3111         return 0;
3112     }
3113 
3114     return -ENODEV;
3115 }
3116 
pci_iommu_unregister_iotlb_notifier(PCIDevice * dev,uint32_t pasid,IOMMUNotifier * n)3117 int pci_iommu_unregister_iotlb_notifier(PCIDevice *dev, uint32_t pasid,
3118                                         IOMMUNotifier *n)
3119 {
3120     PCIBus *bus;
3121     PCIBus *iommu_bus;
3122     int devfn;
3123 
3124     if ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev)) {
3125         return -EPERM;
3126     }
3127 
3128     pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
3129     if (iommu_bus && iommu_bus->iommu_ops->unregister_iotlb_notifier) {
3130         iommu_bus->iommu_ops->unregister_iotlb_notifier(bus,
3131                                                         iommu_bus->iommu_opaque,
3132                                                         devfn, pasid, n);
3133         return 0;
3134     }
3135 
3136     return -ENODEV;
3137 }
3138 
pci_iommu_get_iotlb_info(PCIDevice * dev,uint8_t * addr_width,uint32_t * min_page_size)3139 int pci_iommu_get_iotlb_info(PCIDevice *dev, uint8_t *addr_width,
3140                              uint32_t *min_page_size)
3141 {
3142     PCIBus *bus;
3143     PCIBus *iommu_bus;
3144     int devfn;
3145 
3146     pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn);
3147     if (iommu_bus && iommu_bus->iommu_ops->get_iotlb_info) {
3148         iommu_bus->iommu_ops->get_iotlb_info(iommu_bus->iommu_opaque,
3149                                              addr_width, min_page_size);
3150         return 0;
3151     }
3152 
3153     return -ENODEV;
3154 }
3155 
pci_setup_iommu(PCIBus * bus,const PCIIOMMUOps * ops,void * opaque)3156 void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque)
3157 {
3158     /*
3159      * If called, pci_setup_iommu() should provide a minimum set of
3160      * useful callbacks for the bus.
3161      */
3162     assert(ops);
3163     assert(ops->get_address_space);
3164 
3165     bus->iommu_ops = ops;
3166     bus->iommu_opaque = opaque;
3167 }
3168 
pci_dev_get_w64(PCIBus * b,PCIDevice * dev,void * opaque)3169 static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque)
3170 {
3171     Range *range = opaque;
3172     uint16_t cmd = pci_get_word(dev->config + PCI_COMMAND);
3173     int i;
3174 
3175     if (!(cmd & PCI_COMMAND_MEMORY)) {
3176         return;
3177     }
3178 
3179     if (IS_PCI_BRIDGE(dev)) {
3180         pcibus_t base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
3181         pcibus_t limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
3182 
3183         base = MAX(base, 0x1ULL << 32);
3184 
3185         if (limit >= base) {
3186             Range pref_range;
3187             range_set_bounds(&pref_range, base, limit);
3188             range_extend(range, &pref_range);
3189         }
3190     }
3191     for (i = 0; i < PCI_NUM_REGIONS; ++i) {
3192         PCIIORegion *r = &dev->io_regions[i];
3193         pcibus_t lob, upb;
3194         Range region_range;
3195 
3196         if (!r->size ||
3197             (r->type & PCI_BASE_ADDRESS_SPACE_IO) ||
3198             !(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64)) {
3199             continue;
3200         }
3201 
3202         lob = pci_bar_address(dev, i, r->type, r->size);
3203         upb = lob + r->size - 1;
3204         if (lob == PCI_BAR_UNMAPPED) {
3205             continue;
3206         }
3207 
3208         lob = MAX(lob, 0x1ULL << 32);
3209 
3210         if (upb >= lob) {
3211             range_set_bounds(&region_range, lob, upb);
3212             range_extend(range, &region_range);
3213         }
3214     }
3215 }
3216 
pci_bus_get_w64_range(PCIBus * bus,Range * range)3217 void pci_bus_get_w64_range(PCIBus *bus, Range *range)
3218 {
3219     range_make_empty(range);
3220     pci_for_each_device_under_bus(bus, pci_dev_get_w64, range);
3221 }
3222 
pcie_has_upstream_port(PCIDevice * dev)3223 static bool pcie_has_upstream_port(PCIDevice *dev)
3224 {
3225     PCIDevice *parent_dev = pci_bridge_get_device(pci_get_bus(dev));
3226 
3227     /* Device associated with an upstream port.
3228      * As there are several types of these, it's easier to check the
3229      * parent device: upstream ports are always connected to
3230      * root or downstream ports.
3231      */
3232     return parent_dev &&
3233         pci_is_express(parent_dev) &&
3234         parent_dev->exp.exp_cap &&
3235         (pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_ROOT_PORT ||
3236          pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_DOWNSTREAM);
3237 }
3238 
pci_get_function_0(PCIDevice * pci_dev)3239 PCIDevice *pci_get_function_0(PCIDevice *pci_dev)
3240 {
3241     PCIBus *bus = pci_get_bus(pci_dev);
3242 
3243     if(pcie_has_upstream_port(pci_dev)) {
3244         /* With an upstream PCIe port, we only support 1 device at slot 0 */
3245         return bus->devices[0];
3246     } else {
3247         /* Other bus types might support multiple devices at slots 0-31 */
3248         return bus->devices[PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 0)];
3249     }
3250 }
3251 
pci_get_msi_message(PCIDevice * dev,int vector)3252 MSIMessage pci_get_msi_message(PCIDevice *dev, int vector)
3253 {
3254     MSIMessage msg;
3255     if (msix_enabled(dev)) {
3256         msg = msix_get_message(dev, vector);
3257     } else if (msi_enabled(dev)) {
3258         msg = msi_get_message(dev, vector);
3259     } else {
3260         /* Should never happen */
3261         error_report("%s: unknown interrupt type", __func__);
3262         abort();
3263     }
3264     return msg;
3265 }
3266 
pci_set_power(PCIDevice * d,bool state)3267 void pci_set_power(PCIDevice *d, bool state)
3268 {
3269     /*
3270      * Don't change the enabled state of VFs when powering on/off the device.
3271      *
3272      * When powering on, VFs must not be enabled immediately but they must
3273      * wait until the guest configures SR-IOV.
3274      * When powering off, their corresponding PFs will be reset and disable
3275      * VFs.
3276      */
3277     if (!pci_is_vf(d)) {
3278         pci_set_enabled(d, state);
3279     }
3280 }
3281 
pci_set_enabled(PCIDevice * d,bool state)3282 void pci_set_enabled(PCIDevice *d, bool state)
3283 {
3284     if (d->enabled == state) {
3285         return;
3286     }
3287 
3288     d->enabled = state;
3289     pci_update_mappings(d);
3290     pci_set_master(d, (pci_get_word(d->config + PCI_COMMAND)
3291                       & PCI_COMMAND_MASTER) && d->enabled);
3292     if (qdev_is_realized(&d->qdev)) {
3293         pci_device_reset(d);
3294     }
3295 }
3296 
3297 static const TypeInfo pci_device_type_info = {
3298     .name = TYPE_PCI_DEVICE,
3299     .parent = TYPE_DEVICE,
3300     .instance_size = sizeof(PCIDevice),
3301     .abstract = true,
3302     .class_size = sizeof(PCIDeviceClass),
3303     .class_init = pci_device_class_init,
3304     .class_base_init = pci_device_class_base_init,
3305 };
3306 
pci_register_types(void)3307 static void pci_register_types(void)
3308 {
3309     type_register_static(&pci_bus_info);
3310     type_register_static(&pcie_bus_info);
3311     type_register_static(&cxl_bus_info);
3312     type_register_static(&conventional_pci_interface_info);
3313     type_register_static(&cxl_interface_info);
3314     type_register_static(&pcie_interface_info);
3315     type_register_static(&pci_device_type_info);
3316 }
3317 
3318 type_init(pci_register_types)
3319