xref: /openbmc/qemu/hw/pci/pcie.c (revision 4760cedc)
1 /*
2  * pcie.c
3  *
4  * Copyright (c) 2010 Isaku Yamahata <yamahata at valinux co jp>
5  *                    VA Linux Systems Japan K.K.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with this program; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "hw/pci/pci_bridge.h"
24 #include "hw/pci/pcie.h"
25 #include "hw/pci/msix.h"
26 #include "hw/pci/msi.h"
27 #include "hw/pci/pci_bus.h"
28 #include "hw/pci/pcie_regs.h"
29 #include "hw/pci/pcie_port.h"
30 #include "qemu/range.h"
31 #include "trace.h"
32 
33 //#define DEBUG_PCIE
34 #ifdef DEBUG_PCIE
35 # define PCIE_DPRINTF(fmt, ...)                                         \
36     fprintf(stderr, "%s:%d " fmt, __func__, __LINE__, ## __VA_ARGS__)
37 #else
38 # define PCIE_DPRINTF(fmt, ...) do {} while (0)
39 #endif
40 #define PCIE_DEV_PRINTF(dev, fmt, ...)                                  \
41     PCIE_DPRINTF("%s:%x "fmt, (dev)->name, (dev)->devfn, ## __VA_ARGS__)
42 
43 static bool pcie_sltctl_powered_off(uint16_t sltctl)
44 {
45     return (sltctl & PCI_EXP_SLTCTL_PCC) == PCI_EXP_SLTCTL_PWR_OFF
46         && (sltctl & PCI_EXP_SLTCTL_PIC) == PCI_EXP_SLTCTL_PWR_IND_OFF;
47 }
48 
49 static const char *pcie_led_state_to_str(uint16_t value)
50 {
51     switch (value) {
52     case PCI_EXP_SLTCTL_PWR_IND_ON:
53     case PCI_EXP_SLTCTL_ATTN_IND_ON:
54         return "on";
55     case PCI_EXP_SLTCTL_PWR_IND_BLINK:
56     case PCI_EXP_SLTCTL_ATTN_IND_BLINK:
57         return "blink";
58     case PCI_EXP_SLTCTL_PWR_IND_OFF:
59     case PCI_EXP_SLTCTL_ATTN_IND_OFF:
60         return "off";
61     default:
62         return "invalid";
63     }
64 }
65 
66 /***************************************************************************
67  * pci express capability helper functions
68  */
69 
70 static void
71 pcie_cap_v1_fill(PCIDevice *dev, uint8_t port, uint8_t type, uint8_t version)
72 {
73     uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
74     uint8_t *cmask = dev->cmask + dev->exp.exp_cap;
75 
76     /* capability register
77     interrupt message number defaults to 0 */
78     pci_set_word(exp_cap + PCI_EXP_FLAGS,
79                  ((type << PCI_EXP_FLAGS_TYPE_SHIFT) & PCI_EXP_FLAGS_TYPE) |
80                  version);
81 
82     /* device capability register
83      * table 7-12:
84      * roll based error reporting bit must be set by all
85      * Functions conforming to the ECN, PCI Express Base
86      * Specification, Revision 1.1., or subsequent PCI Express Base
87      * Specification revisions.
88      */
89     pci_set_long(exp_cap + PCI_EXP_DEVCAP, PCI_EXP_DEVCAP_RBER);
90 
91     pci_set_long(exp_cap + PCI_EXP_LNKCAP,
92                  (port << PCI_EXP_LNKCAP_PN_SHIFT) |
93                  PCI_EXP_LNKCAP_ASPMS_0S |
94                  QEMU_PCI_EXP_LNKCAP_MLW(QEMU_PCI_EXP_LNK_X1) |
95                  QEMU_PCI_EXP_LNKCAP_MLS(QEMU_PCI_EXP_LNK_2_5GT));
96 
97     pci_set_word(exp_cap + PCI_EXP_LNKSTA,
98                  QEMU_PCI_EXP_LNKSTA_NLW(QEMU_PCI_EXP_LNK_X1) |
99                  QEMU_PCI_EXP_LNKSTA_CLS(QEMU_PCI_EXP_LNK_2_5GT));
100 
101     /* We changed link status bits over time, and changing them across
102      * migrations is generally fine as hardware changes them too.
103      * Let's not bother checking.
104      */
105     pci_set_word(cmask + PCI_EXP_LNKSTA, 0);
106 }
107 
108 static void pcie_cap_fill_slot_lnk(PCIDevice *dev)
109 {
110     PCIESlot *s = (PCIESlot *)object_dynamic_cast(OBJECT(dev), TYPE_PCIE_SLOT);
111     uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
112 
113     /* Skip anything that isn't a PCIESlot */
114     if (!s) {
115         return;
116     }
117 
118     /* Clear and fill LNKCAP from what was configured above */
119     pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKCAP,
120                                  PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS);
121     pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP,
122                                QEMU_PCI_EXP_LNKCAP_MLW(s->width) |
123                                QEMU_PCI_EXP_LNKCAP_MLS(s->speed));
124 
125     /*
126      * Link bandwidth notification is required for all root ports and
127      * downstream ports supporting links wider than x1 or multiple link
128      * speeds.
129      */
130     if (s->width > QEMU_PCI_EXP_LNK_X1 ||
131         s->speed > QEMU_PCI_EXP_LNK_2_5GT) {
132         pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP,
133                                    PCI_EXP_LNKCAP_LBNC);
134     }
135 
136     if (s->speed > QEMU_PCI_EXP_LNK_2_5GT) {
137         /*
138          * Hot-plug capable downstream ports and downstream ports supporting
139          * link speeds greater than 5GT/s must hardwire PCI_EXP_LNKCAP_DLLLARC
140          * to 1b.  PCI_EXP_LNKCAP_DLLLARC implies PCI_EXP_LNKSTA_DLLLA, which
141          * we also hardwire to 1b here.  2.5GT/s hot-plug slots should also
142          * technically implement this, but it's not done here for compatibility.
143          */
144         pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP,
145                                    PCI_EXP_LNKCAP_DLLLARC);
146         /* the PCI_EXP_LNKSTA_DLLLA will be set in the hotplug function */
147 
148         /*
149          * Target Link Speed defaults to the highest link speed supported by
150          * the component.  2.5GT/s devices are permitted to hardwire to zero.
151          */
152         pci_word_test_and_clear_mask(exp_cap + PCI_EXP_LNKCTL2,
153                                      PCI_EXP_LNKCTL2_TLS);
154         pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKCTL2,
155                                    QEMU_PCI_EXP_LNKCAP_MLS(s->speed) &
156                                    PCI_EXP_LNKCTL2_TLS);
157     }
158 
159     /*
160      * 2.5 & 5.0GT/s can be fully described by LNKCAP, but 8.0GT/s is
161      * actually a reference to the highest bit supported in this register.
162      * We assume the device supports all link speeds.
163      */
164     if (s->speed > QEMU_PCI_EXP_LNK_5GT) {
165         pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKCAP2, ~0U);
166         pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2,
167                                    PCI_EXP_LNKCAP2_SLS_2_5GB |
168                                    PCI_EXP_LNKCAP2_SLS_5_0GB |
169                                    PCI_EXP_LNKCAP2_SLS_8_0GB);
170         if (s->speed > QEMU_PCI_EXP_LNK_8GT) {
171             pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2,
172                                        PCI_EXP_LNKCAP2_SLS_16_0GB);
173         }
174     }
175 }
176 
177 int pcie_cap_init(PCIDevice *dev, uint8_t offset,
178                   uint8_t type, uint8_t port,
179                   Error **errp)
180 {
181     /* PCIe cap v2 init */
182     int pos;
183     uint8_t *exp_cap;
184 
185     assert(pci_is_express(dev));
186 
187     pos = pci_add_capability(dev, PCI_CAP_ID_EXP, offset,
188                              PCI_EXP_VER2_SIZEOF, errp);
189     if (pos < 0) {
190         return pos;
191     }
192     dev->exp.exp_cap = pos;
193     exp_cap = dev->config + pos;
194 
195     /* Filling values common with v1 */
196     pcie_cap_v1_fill(dev, port, type, PCI_EXP_FLAGS_VER2);
197 
198     /* Fill link speed and width options */
199     pcie_cap_fill_slot_lnk(dev);
200 
201     /* Filling v2 specific values */
202     pci_set_long(exp_cap + PCI_EXP_DEVCAP2,
203                  PCI_EXP_DEVCAP2_EFF | PCI_EXP_DEVCAP2_EETLPP);
204 
205     pci_set_word(dev->wmask + pos + PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_EETLPPB);
206 
207     if (dev->cap_present & QEMU_PCIE_EXTCAP_INIT) {
208         /* read-only to behave like a 'NULL' Extended Capability Header */
209         pci_set_long(dev->wmask + PCI_CONFIG_SPACE_SIZE, 0);
210     }
211 
212     return pos;
213 }
214 
215 int pcie_cap_v1_init(PCIDevice *dev, uint8_t offset, uint8_t type,
216                      uint8_t port)
217 {
218     /* PCIe cap v1 init */
219     int pos;
220     Error *local_err = NULL;
221 
222     assert(pci_is_express(dev));
223 
224     pos = pci_add_capability(dev, PCI_CAP_ID_EXP, offset,
225                              PCI_EXP_VER1_SIZEOF, &local_err);
226     if (pos < 0) {
227         error_report_err(local_err);
228         return pos;
229     }
230     dev->exp.exp_cap = pos;
231 
232     pcie_cap_v1_fill(dev, port, type, PCI_EXP_FLAGS_VER1);
233 
234     return pos;
235 }
236 
237 static int
238 pcie_endpoint_cap_common_init(PCIDevice *dev, uint8_t offset, uint8_t cap_size)
239 {
240     uint8_t type = PCI_EXP_TYPE_ENDPOINT;
241     Error *local_err = NULL;
242     int ret;
243 
244     /*
245      * Windows guests will report Code 10, device cannot start, if
246      * a regular Endpoint type is exposed on a root complex.  These
247      * should instead be Root Complex Integrated Endpoints.
248      */
249     if (pci_bus_is_express(pci_get_bus(dev))
250         && pci_bus_is_root(pci_get_bus(dev))) {
251         type = PCI_EXP_TYPE_RC_END;
252     }
253 
254     if (cap_size == PCI_EXP_VER1_SIZEOF) {
255         return pcie_cap_v1_init(dev, offset, type, 0);
256     } else {
257         ret = pcie_cap_init(dev, offset, type, 0, &local_err);
258 
259         if (ret < 0) {
260             error_report_err(local_err);
261         }
262 
263         return ret;
264     }
265 }
266 
267 int pcie_endpoint_cap_init(PCIDevice *dev, uint8_t offset)
268 {
269     return pcie_endpoint_cap_common_init(dev, offset, PCI_EXP_VER2_SIZEOF);
270 }
271 
272 int pcie_endpoint_cap_v1_init(PCIDevice *dev, uint8_t offset)
273 {
274     return pcie_endpoint_cap_common_init(dev, offset, PCI_EXP_VER1_SIZEOF);
275 }
276 
277 void pcie_cap_exit(PCIDevice *dev)
278 {
279     pci_del_capability(dev, PCI_CAP_ID_EXP, PCI_EXP_VER2_SIZEOF);
280 }
281 
282 void pcie_cap_v1_exit(PCIDevice *dev)
283 {
284     pci_del_capability(dev, PCI_CAP_ID_EXP, PCI_EXP_VER1_SIZEOF);
285 }
286 
287 uint8_t pcie_cap_get_type(const PCIDevice *dev)
288 {
289     uint32_t pos = dev->exp.exp_cap;
290     assert(pos > 0);
291     return (pci_get_word(dev->config + pos + PCI_EXP_FLAGS) &
292             PCI_EXP_FLAGS_TYPE) >> PCI_EXP_FLAGS_TYPE_SHIFT;
293 }
294 
295 uint8_t pcie_cap_get_version(const PCIDevice *dev)
296 {
297     uint32_t pos = dev->exp.exp_cap;
298     assert(pos > 0);
299     return pci_get_word(dev->config + pos + PCI_EXP_FLAGS) & PCI_EXP_FLAGS_VERS;
300 }
301 
302 /* MSI/MSI-X */
303 /* pci express interrupt message number */
304 /* 7.8.2 PCI Express Capabilities Register: Interrupt Message Number */
305 void pcie_cap_flags_set_vector(PCIDevice *dev, uint8_t vector)
306 {
307     uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
308     assert(vector < 32);
309     pci_word_test_and_clear_mask(exp_cap + PCI_EXP_FLAGS, PCI_EXP_FLAGS_IRQ);
310     pci_word_test_and_set_mask(exp_cap + PCI_EXP_FLAGS,
311                                vector << PCI_EXP_FLAGS_IRQ_SHIFT);
312 }
313 
314 uint8_t pcie_cap_flags_get_vector(PCIDevice *dev)
315 {
316     return (pci_get_word(dev->config + dev->exp.exp_cap + PCI_EXP_FLAGS) &
317             PCI_EXP_FLAGS_IRQ) >> PCI_EXP_FLAGS_IRQ_SHIFT;
318 }
319 
320 void pcie_cap_deverr_init(PCIDevice *dev)
321 {
322     uint32_t pos = dev->exp.exp_cap;
323     pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_DEVCAP,
324                                PCI_EXP_DEVCAP_RBER);
325     pci_long_test_and_set_mask(dev->wmask + pos + PCI_EXP_DEVCTL,
326                                PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE |
327                                PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
328     pci_long_test_and_set_mask(dev->w1cmask + pos + PCI_EXP_DEVSTA,
329                                PCI_EXP_DEVSTA_CED | PCI_EXP_DEVSTA_NFED |
330                                PCI_EXP_DEVSTA_FED | PCI_EXP_DEVSTA_URD);
331 }
332 
333 void pcie_cap_deverr_reset(PCIDevice *dev)
334 {
335     uint8_t *devctl = dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL;
336     pci_long_test_and_clear_mask(devctl,
337                                  PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE |
338                                  PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
339 }
340 
341 void pcie_cap_lnkctl_init(PCIDevice *dev)
342 {
343     uint32_t pos = dev->exp.exp_cap;
344     pci_long_test_and_set_mask(dev->wmask + pos + PCI_EXP_LNKCTL,
345                                PCI_EXP_LNKCTL_CCC | PCI_EXP_LNKCTL_ES);
346 }
347 
348 void pcie_cap_lnkctl_reset(PCIDevice *dev)
349 {
350     uint8_t *lnkctl = dev->config + dev->exp.exp_cap + PCI_EXP_LNKCTL;
351     pci_long_test_and_clear_mask(lnkctl,
352                                  PCI_EXP_LNKCTL_CCC | PCI_EXP_LNKCTL_ES);
353 }
354 
355 static void hotplug_event_update_event_status(PCIDevice *dev)
356 {
357     uint32_t pos = dev->exp.exp_cap;
358     uint8_t *exp_cap = dev->config + pos;
359     uint16_t sltctl = pci_get_word(exp_cap + PCI_EXP_SLTCTL);
360     uint16_t sltsta = pci_get_word(exp_cap + PCI_EXP_SLTSTA);
361 
362     dev->exp.hpev_notified = (sltctl & PCI_EXP_SLTCTL_HPIE) &&
363         (sltsta & sltctl & PCI_EXP_HP_EV_SUPPORTED);
364 }
365 
366 static void hotplug_event_notify(PCIDevice *dev)
367 {
368     bool prev = dev->exp.hpev_notified;
369 
370     hotplug_event_update_event_status(dev);
371 
372     if (prev == dev->exp.hpev_notified) {
373         return;
374     }
375 
376     /* Note: the logic above does not take into account whether interrupts
377      * are masked. The result is that interrupt will be sent when it is
378      * subsequently unmasked. This appears to be legal: Section 6.7.3.4:
379      * The Port may optionally send an MSI when there are hot-plug events that
380      * occur while interrupt generation is disabled, and interrupt generation is
381      * subsequently enabled. */
382     if (msix_enabled(dev)) {
383         msix_notify(dev, pcie_cap_flags_get_vector(dev));
384     } else if (msi_enabled(dev)) {
385         msi_notify(dev, pcie_cap_flags_get_vector(dev));
386     } else if (pci_intx(dev) != -1) {
387         pci_set_irq(dev, dev->exp.hpev_notified);
388     }
389 }
390 
391 static void hotplug_event_clear(PCIDevice *dev)
392 {
393     hotplug_event_update_event_status(dev);
394     if (!msix_enabled(dev) && !msi_enabled(dev) && pci_intx(dev) != -1 &&
395         !dev->exp.hpev_notified) {
396         pci_irq_deassert(dev);
397     }
398 }
399 
400 void pcie_cap_slot_enable_power(PCIDevice *dev)
401 {
402     uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
403     uint32_t sltcap = pci_get_long(exp_cap + PCI_EXP_SLTCAP);
404 
405     if (sltcap & PCI_EXP_SLTCAP_PCP) {
406         pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL,
407                                      PCI_EXP_SLTCTL_PCC);
408     }
409 }
410 
411 static void pcie_set_power_device(PCIBus *bus, PCIDevice *dev, void *opaque)
412 {
413     bool *power = opaque;
414 
415     pci_set_power(dev, *power);
416 }
417 
418 static void pcie_cap_update_power(PCIDevice *hotplug_dev)
419 {
420     uint8_t *exp_cap = hotplug_dev->config + hotplug_dev->exp.exp_cap;
421     PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(hotplug_dev));
422     uint32_t sltcap = pci_get_long(exp_cap + PCI_EXP_SLTCAP);
423     uint16_t sltctl = pci_get_word(exp_cap + PCI_EXP_SLTCTL);
424     bool power = true;
425 
426     if (sltcap & PCI_EXP_SLTCAP_PCP) {
427         power = (sltctl & PCI_EXP_SLTCTL_PCC) == PCI_EXP_SLTCTL_PWR_ON;
428         /* Don't we need to check also (sltctl & PCI_EXP_SLTCTL_PIC) ? */
429     }
430 
431     pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
432                         pcie_set_power_device, &power);
433 }
434 
435 /*
436  * A PCI Express Hot-Plug Event has occurred, so update slot status register
437  * and notify OS of the event if necessary.
438  *
439  * 6.7.3 PCI Express Hot-Plug Events
440  * 6.7.3.4 Software Notification of Hot-Plug Events
441  */
442 static void pcie_cap_slot_event(PCIDevice *dev, PCIExpressHotPlugEvent event)
443 {
444     /* Minor optimization: if nothing changed - no event is needed. */
445     if (pci_word_test_and_set_mask(dev->config + dev->exp.exp_cap +
446                                    PCI_EXP_SLTSTA, event) == event) {
447         return;
448     }
449     hotplug_event_notify(dev);
450 }
451 
452 static void pcie_cap_slot_plug_common(PCIDevice *hotplug_dev, DeviceState *dev,
453                                       Error **errp)
454 {
455     uint8_t *exp_cap = hotplug_dev->config + hotplug_dev->exp.exp_cap;
456     uint16_t sltsta = pci_get_word(exp_cap + PCI_EXP_SLTSTA);
457 
458     PCIE_DEV_PRINTF(PCI_DEVICE(dev), "hotplug state: 0x%x\n", sltsta);
459     if (sltsta & PCI_EXP_SLTSTA_EIS) {
460         /* the slot is electromechanically locked.
461          * This error is propagated up to qdev and then to HMP/QMP.
462          */
463         error_setg_errno(errp, EBUSY, "slot is electromechanically locked");
464     }
465 }
466 
467 void pcie_cap_slot_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
468                                Error **errp)
469 {
470     PCIDevice *hotplug_pdev = PCI_DEVICE(hotplug_dev);
471     uint8_t *exp_cap = hotplug_pdev->config + hotplug_pdev->exp.exp_cap;
472     uint32_t sltcap = pci_get_word(exp_cap + PCI_EXP_SLTCAP);
473 
474     /* Check if hot-plug is disabled on the slot */
475     if (dev->hotplugged && (sltcap & PCI_EXP_SLTCAP_HPC) == 0) {
476         error_setg(errp, "Hot-plug failed: unsupported by the port device '%s'",
477                          DEVICE(hotplug_pdev)->id);
478         return;
479     }
480 
481     pcie_cap_slot_plug_common(PCI_DEVICE(hotplug_dev), dev, errp);
482 }
483 
484 void pcie_cap_slot_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
485                            Error **errp)
486 {
487     PCIDevice *hotplug_pdev = PCI_DEVICE(hotplug_dev);
488     uint8_t *exp_cap = hotplug_pdev->config + hotplug_pdev->exp.exp_cap;
489     PCIDevice *pci_dev = PCI_DEVICE(dev);
490     uint32_t lnkcap = pci_get_long(exp_cap + PCI_EXP_LNKCAP);
491 
492     if (pci_is_vf(pci_dev)) {
493         /* Virtual function cannot be physically disconnected */
494         return;
495     }
496 
497     /* Don't send event when device is enabled during qemu machine creation:
498      * it is present on boot, no hotplug event is necessary. We do send an
499      * event when the device is disabled later. */
500     if (!dev->hotplugged) {
501         pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA,
502                                    PCI_EXP_SLTSTA_PDS);
503         if (pci_dev->cap_present & QEMU_PCIE_LNKSTA_DLLLA ||
504             (lnkcap & PCI_EXP_LNKCAP_DLLLARC)) {
505             pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA,
506                                        PCI_EXP_LNKSTA_DLLLA);
507         }
508         pcie_cap_update_power(hotplug_pdev);
509         return;
510     }
511 
512     /* To enable multifunction hot-plug, we just ensure the function
513      * 0 added last. When function 0 is added, we set the sltsta and
514      * inform OS via event notification.
515      */
516     if (pci_get_function_0(pci_dev)) {
517         pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA,
518                                    PCI_EXP_SLTSTA_PDS);
519         if (pci_dev->cap_present & QEMU_PCIE_LNKSTA_DLLLA ||
520             (lnkcap & PCI_EXP_LNKCAP_DLLLARC)) {
521             pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA,
522                                        PCI_EXP_LNKSTA_DLLLA);
523         }
524         pcie_cap_slot_event(hotplug_pdev,
525                             PCI_EXP_HP_EV_PDC | PCI_EXP_HP_EV_ABP);
526         pcie_cap_update_power(hotplug_pdev);
527     }
528 }
529 
530 void pcie_cap_slot_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
531                              Error **errp)
532 {
533     qdev_unrealize(dev);
534 }
535 
536 static void pcie_unplug_device(PCIBus *bus, PCIDevice *dev, void *opaque)
537 {
538     HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(DEVICE(dev));
539 
540     if (dev->partially_hotplugged) {
541         dev->qdev.pending_deleted_event = false;
542         return;
543     }
544     hotplug_handler_unplug(hotplug_ctrl, DEVICE(dev), &error_abort);
545     object_unparent(OBJECT(dev));
546 }
547 
548 static void pcie_cap_slot_do_unplug(PCIDevice *dev)
549 {
550     PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev));
551     uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
552     uint32_t lnkcap = pci_get_long(exp_cap + PCI_EXP_LNKCAP);
553 
554     pci_for_each_device_under_bus(sec_bus, pcie_unplug_device, NULL);
555 
556     pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA,
557                                  PCI_EXP_SLTSTA_PDS);
558     if (dev->cap_present & QEMU_PCIE_LNKSTA_DLLLA ||
559         (lnkcap & PCI_EXP_LNKCAP_DLLLARC)) {
560         pci_word_test_and_clear_mask(exp_cap + PCI_EXP_LNKSTA,
561                                      PCI_EXP_LNKSTA_DLLLA);
562     }
563     pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA,
564                                PCI_EXP_SLTSTA_PDC);
565 }
566 
567 void pcie_cap_slot_unplug_request_cb(HotplugHandler *hotplug_dev,
568                                      DeviceState *dev, Error **errp)
569 {
570     Error *local_err = NULL;
571     PCIDevice *pci_dev = PCI_DEVICE(dev);
572     PCIBus *bus = pci_get_bus(pci_dev);
573     PCIDevice *hotplug_pdev = PCI_DEVICE(hotplug_dev);
574     uint8_t *exp_cap = hotplug_pdev->config + hotplug_pdev->exp.exp_cap;
575     uint32_t sltcap = pci_get_word(exp_cap + PCI_EXP_SLTCAP);
576     uint16_t sltctl = pci_get_word(exp_cap + PCI_EXP_SLTCTL);
577 
578     /* Check if hot-unplug is disabled on the slot */
579     if ((sltcap & PCI_EXP_SLTCAP_HPC) == 0) {
580         error_setg(errp, "Hot-unplug failed: "
581                          "unsupported by the port device '%s'",
582                          DEVICE(hotplug_pdev)->id);
583         return;
584     }
585 
586     pcie_cap_slot_plug_common(hotplug_pdev, dev, &local_err);
587     if (local_err) {
588         error_propagate(errp, local_err);
589         return;
590     }
591 
592     if ((sltctl & PCI_EXP_SLTCTL_PIC) == PCI_EXP_SLTCTL_PWR_IND_BLINK) {
593         error_setg(errp, "Hot-unplug failed: "
594                    "guest is busy (power indicator blinking)");
595         return;
596     }
597 
598     dev->pending_deleted_event = true;
599     dev->pending_deleted_expires_ms =
600         qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 5000; /* 5 secs */
601 
602     /* In case user cancel the operation of multi-function hot-add,
603      * remove the function that is unexposed to guest individually,
604      * without interaction with guest.
605      */
606     if (pci_dev->devfn &&
607         !bus->devices[0]) {
608         pcie_unplug_device(bus, pci_dev, NULL);
609 
610         return;
611     }
612 
613     if (pcie_sltctl_powered_off(sltctl)) {
614         /* slot is powered off -> unplug without round-trip to the guest */
615         pcie_cap_slot_do_unplug(hotplug_pdev);
616         hotplug_event_notify(hotplug_pdev);
617         pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA,
618                                      PCI_EXP_SLTSTA_ABP);
619         return;
620     }
621 
622     pcie_cap_slot_push_attention_button(hotplug_pdev);
623 }
624 
625 /* pci express slot for pci express root/downstream port
626    PCI express capability slot registers */
627 void pcie_cap_slot_init(PCIDevice *dev, PCIESlot *s)
628 {
629     uint32_t pos = dev->exp.exp_cap;
630 
631     pci_word_test_and_set_mask(dev->config + pos + PCI_EXP_FLAGS,
632                                PCI_EXP_FLAGS_SLOT);
633 
634     pci_long_test_and_clear_mask(dev->config + pos + PCI_EXP_SLTCAP,
635                                  ~PCI_EXP_SLTCAP_PSN);
636     pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCAP,
637                                (s->slot << PCI_EXP_SLTCAP_PSN_SHIFT) |
638                                PCI_EXP_SLTCAP_EIP |
639                                PCI_EXP_SLTCAP_PIP |
640                                PCI_EXP_SLTCAP_AIP |
641                                PCI_EXP_SLTCAP_ABP);
642 
643     /*
644      * Expose native hot-plug on all bridges if hot-plug is enabled on the slot.
645      * (unless broken 6.1 ABI is enforced for compat reasons)
646      */
647     if (s->hotplug &&
648         (!s->hide_native_hotplug_cap || DEVICE(dev)->hotplugged)) {
649         pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCAP,
650                                    PCI_EXP_SLTCAP_HPS |
651                                    PCI_EXP_SLTCAP_HPC);
652     }
653 
654     if (dev->cap_present & QEMU_PCIE_SLTCAP_PCP) {
655         pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCAP,
656                                    PCI_EXP_SLTCAP_PCP);
657         pci_word_test_and_clear_mask(dev->config + pos + PCI_EXP_SLTCTL,
658                                      PCI_EXP_SLTCTL_PCC);
659         pci_word_test_and_set_mask(dev->wmask + pos + PCI_EXP_SLTCTL,
660                                    PCI_EXP_SLTCTL_PCC);
661     }
662 
663     pci_word_test_and_clear_mask(dev->config + pos + PCI_EXP_SLTCTL,
664                                  PCI_EXP_SLTCTL_PIC |
665                                  PCI_EXP_SLTCTL_AIC);
666     pci_word_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCTL,
667                                PCI_EXP_SLTCTL_PWR_IND_OFF |
668                                PCI_EXP_SLTCTL_ATTN_IND_OFF);
669     pci_word_test_and_set_mask(dev->wmask + pos + PCI_EXP_SLTCTL,
670                                PCI_EXP_SLTCTL_PIC |
671                                PCI_EXP_SLTCTL_AIC |
672                                PCI_EXP_SLTCTL_HPIE |
673                                PCI_EXP_SLTCTL_CCIE |
674                                PCI_EXP_SLTCTL_PDCE |
675                                PCI_EXP_SLTCTL_ABPE);
676     /* Although reading PCI_EXP_SLTCTL_EIC returns always 0,
677      * make the bit writable here in order to detect 1b is written.
678      * pcie_cap_slot_write_config() test-and-clear the bit, so
679      * this bit always returns 0 to the guest.
680      */
681     pci_word_test_and_set_mask(dev->wmask + pos + PCI_EXP_SLTCTL,
682                                PCI_EXP_SLTCTL_EIC);
683 
684     pci_word_test_and_set_mask(dev->w1cmask + pos + PCI_EXP_SLTSTA,
685                                PCI_EXP_HP_EV_SUPPORTED);
686 
687     /* Avoid migration abortion when this device hot-removed by guest */
688     pci_word_test_and_clear_mask(dev->cmask + pos + PCI_EXP_SLTSTA,
689                                  PCI_EXP_SLTSTA_PDS);
690 
691     dev->exp.hpev_notified = false;
692 
693     qbus_set_hotplug_handler(BUS(pci_bridge_get_sec_bus(PCI_BRIDGE(dev))),
694                              OBJECT(dev));
695 }
696 
697 void pcie_cap_slot_reset(PCIDevice *dev)
698 {
699     uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
700     uint8_t port_type = pcie_cap_get_type(dev);
701 
702     assert(port_type == PCI_EXP_TYPE_DOWNSTREAM ||
703            port_type == PCI_EXP_TYPE_ROOT_PORT);
704 
705     PCIE_DEV_PRINTF(dev, "reset\n");
706 
707     pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL,
708                                  PCI_EXP_SLTCTL_EIC |
709                                  PCI_EXP_SLTCTL_PIC |
710                                  PCI_EXP_SLTCTL_AIC |
711                                  PCI_EXP_SLTCTL_HPIE |
712                                  PCI_EXP_SLTCTL_CCIE |
713                                  PCI_EXP_SLTCTL_PDCE |
714                                  PCI_EXP_SLTCTL_ABPE);
715     pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL,
716                                PCI_EXP_SLTCTL_PWR_IND_OFF |
717                                PCI_EXP_SLTCTL_ATTN_IND_OFF);
718 
719     if (dev->cap_present & QEMU_PCIE_SLTCAP_PCP) {
720         /* Downstream ports enforce device number 0. */
721         bool populated = pci_bridge_get_sec_bus(PCI_BRIDGE(dev))->devices[0];
722         uint16_t pic;
723 
724         if (populated) {
725             pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL,
726                                          PCI_EXP_SLTCTL_PCC);
727         } else {
728             pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL,
729                                        PCI_EXP_SLTCTL_PCC);
730         }
731 
732         pic = populated ?
733                 PCI_EXP_SLTCTL_PWR_IND_ON : PCI_EXP_SLTCTL_PWR_IND_OFF;
734         pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL, pic);
735     }
736 
737     pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA,
738                                  PCI_EXP_SLTSTA_EIS |/* on reset,
739                                                         the lock is released */
740                                  PCI_EXP_SLTSTA_CC |
741                                  PCI_EXP_SLTSTA_PDC |
742                                  PCI_EXP_SLTSTA_ABP);
743 
744     pcie_cap_update_power(dev);
745     hotplug_event_update_event_status(dev);
746 }
747 
748 void pcie_cap_slot_get(PCIDevice *dev, uint16_t *slt_ctl, uint16_t *slt_sta)
749 {
750     uint32_t pos = dev->exp.exp_cap;
751     uint8_t *exp_cap = dev->config + pos;
752     *slt_ctl = pci_get_word(exp_cap + PCI_EXP_SLTCTL);
753     *slt_sta = pci_get_word(exp_cap + PCI_EXP_SLTSTA);
754 }
755 
756 static void find_child_fn(PCIBus *bus, PCIDevice *dev, void *opaque)
757 {
758     PCIDevice **child = opaque;
759 
760     if (!*child) {
761         *child = dev;
762     }
763 }
764 
765 /*
766  * Returns the plugged device or first function of multifunction plugged device
767  */
768 static PCIDevice *pcie_cap_slot_find_child(PCIDevice *dev)
769 {
770     PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev));
771     PCIDevice *child = NULL;
772 
773     pci_for_each_device(sec_bus, pci_bus_num(sec_bus), find_child_fn, &child);
774 
775     return child;
776 }
777 
778 void pcie_cap_slot_write_config(PCIDevice *dev,
779                                 uint16_t old_slt_ctl, uint16_t old_slt_sta,
780                                 uint32_t addr, uint32_t val, int len)
781 {
782     uint32_t pos = dev->exp.exp_cap;
783     uint8_t *exp_cap = dev->config + pos;
784     uint16_t sltsta = pci_get_word(exp_cap + PCI_EXP_SLTSTA);
785 
786     if (ranges_overlap(addr, len, pos + PCI_EXP_SLTSTA, 2)) {
787         /*
788          * Guests tend to clears all bits during init.
789          * If they clear bits that weren't set this is racy and will lose events:
790          * not a big problem for manual button presses, but a problem for us.
791          * As a work-around, detect this and revert status to what it was
792          * before the write.
793          *
794          * Note: in theory this can be detected as a duplicate button press
795          * which cancels the previous press. Does not seem to happen in
796          * practice as guests seem to only have this bug during init.
797          */
798 #define PCIE_SLOT_EVENTS (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | \
799                           PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | \
800                           PCI_EXP_SLTSTA_CC)
801 
802         if (val & ~old_slt_sta & PCIE_SLOT_EVENTS) {
803             sltsta = (sltsta & ~PCIE_SLOT_EVENTS) | (old_slt_sta & PCIE_SLOT_EVENTS);
804             pci_set_word(exp_cap + PCI_EXP_SLTSTA, sltsta);
805         }
806         hotplug_event_clear(dev);
807     }
808 
809     if (!ranges_overlap(addr, len, pos + PCI_EXP_SLTCTL, 2)) {
810         return;
811     }
812 
813     if (pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL,
814                                      PCI_EXP_SLTCTL_EIC)) {
815         sltsta ^= PCI_EXP_SLTSTA_EIS; /* toggle PCI_EXP_SLTSTA_EIS bit */
816         pci_set_word(exp_cap + PCI_EXP_SLTSTA, sltsta);
817         PCIE_DEV_PRINTF(dev, "PCI_EXP_SLTCTL_EIC: "
818                         "sltsta -> 0x%02"PRIx16"\n",
819                         sltsta);
820     }
821 
822     if (trace_event_get_state_backends(TRACE_PCIE_CAP_SLOT_WRITE_CONFIG)) {
823         DeviceState *parent = DEVICE(dev);
824         DeviceState *child = DEVICE(pcie_cap_slot_find_child(dev));
825 
826         trace_pcie_cap_slot_write_config(
827             parent->canonical_path,
828             child ? child->canonical_path : "no-child",
829             (sltsta & PCI_EXP_SLTSTA_PDS) ? "present" : "not present",
830             pcie_led_state_to_str(old_slt_ctl & PCI_EXP_SLTCTL_PIC),
831             pcie_led_state_to_str(val & PCI_EXP_SLTCTL_PIC),
832             pcie_led_state_to_str(old_slt_ctl & PCI_EXP_SLTCTL_AIC),
833             pcie_led_state_to_str(val & PCI_EXP_SLTCTL_AIC),
834             (old_slt_ctl & PCI_EXP_SLTCTL_PWR_OFF) ? "off" : "on",
835             (val & PCI_EXP_SLTCTL_PWR_OFF) ? "off" : "on");
836     }
837 
838     /*
839      * If the slot is populated, power indicator is off and power
840      * controller is off, it is safe to detach the devices.
841      *
842      * Note: don't detach if condition was already true:
843      * this is a work around for guests that overwrite
844      * control of powered off slots before powering them on.
845      */
846     if ((sltsta & PCI_EXP_SLTSTA_PDS) && pcie_sltctl_powered_off(val) &&
847         !pcie_sltctl_powered_off(old_slt_ctl))
848     {
849         pcie_cap_slot_do_unplug(dev);
850     }
851     pcie_cap_update_power(dev);
852 
853     hotplug_event_notify(dev);
854 
855     /*
856      * 6.7.3.2 Command Completed Events
857      *
858      * Software issues a command to a hot-plug capable Downstream Port by
859      * issuing a write transaction that targets any portion of the Port’s Slot
860      * Control register. A single write to the Slot Control register is
861      * considered to be a single command, even if the write affects more than
862      * one field in the Slot Control register. In response to this transaction,
863      * the Port must carry out the requested actions and then set the
864      * associated status field for the command completed event. */
865 
866     /* Real hardware might take a while to complete requested command because
867      * physical movement would be involved like locking the electromechanical
868      * lock.  However in our case, command is completed instantaneously above,
869      * so send a command completion event right now.
870      */
871     pcie_cap_slot_event(dev, PCI_EXP_HP_EV_CCI);
872 }
873 
874 int pcie_cap_slot_post_load(void *opaque, int version_id)
875 {
876     PCIDevice *dev = opaque;
877     hotplug_event_update_event_status(dev);
878     pcie_cap_update_power(dev);
879     return 0;
880 }
881 
882 void pcie_cap_slot_push_attention_button(PCIDevice *dev)
883 {
884     pcie_cap_slot_event(dev, PCI_EXP_HP_EV_ABP);
885 }
886 
887 /* root control/capabilities/status. PME isn't emulated for now */
888 void pcie_cap_root_init(PCIDevice *dev)
889 {
890     pci_set_word(dev->wmask + dev->exp.exp_cap + PCI_EXP_RTCTL,
891                  PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE |
892                  PCI_EXP_RTCTL_SEFEE);
893 }
894 
895 void pcie_cap_root_reset(PCIDevice *dev)
896 {
897     pci_set_word(dev->config + dev->exp.exp_cap + PCI_EXP_RTCTL, 0);
898 }
899 
900 /* function level reset(FLR) */
901 void pcie_cap_flr_init(PCIDevice *dev)
902 {
903     pci_long_test_and_set_mask(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCAP,
904                                PCI_EXP_DEVCAP_FLR);
905 
906     /* Although reading BCR_FLR returns always 0,
907      * the bit is made writable here in order to detect the 1b is written
908      * pcie_cap_flr_write_config() test-and-clear the bit, so
909      * this bit always returns 0 to the guest.
910      */
911     pci_word_test_and_set_mask(dev->wmask + dev->exp.exp_cap + PCI_EXP_DEVCTL,
912                                PCI_EXP_DEVCTL_BCR_FLR);
913 }
914 
915 void pcie_cap_flr_write_config(PCIDevice *dev,
916                                uint32_t addr, uint32_t val, int len)
917 {
918     uint8_t *devctl = dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL;
919     if (pci_get_word(devctl) & PCI_EXP_DEVCTL_BCR_FLR) {
920         /* Clear PCI_EXP_DEVCTL_BCR_FLR after invoking the reset handler
921            so the handler can detect FLR by looking at this bit. */
922         pci_device_reset(dev);
923         pci_word_test_and_clear_mask(devctl, PCI_EXP_DEVCTL_BCR_FLR);
924     }
925 }
926 
927 /* Alternative Routing-ID Interpretation (ARI)
928  * forwarding support for root and downstream ports
929  */
930 void pcie_cap_arifwd_init(PCIDevice *dev)
931 {
932     uint32_t pos = dev->exp.exp_cap;
933     pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_DEVCAP2,
934                                PCI_EXP_DEVCAP2_ARI);
935     pci_long_test_and_set_mask(dev->wmask + pos + PCI_EXP_DEVCTL2,
936                                PCI_EXP_DEVCTL2_ARI);
937 }
938 
939 void pcie_cap_arifwd_reset(PCIDevice *dev)
940 {
941     uint8_t *devctl2 = dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL2;
942     pci_long_test_and_clear_mask(devctl2, PCI_EXP_DEVCTL2_ARI);
943 }
944 
945 bool pcie_cap_is_arifwd_enabled(const PCIDevice *dev)
946 {
947     if (!pci_is_express(dev)) {
948         return false;
949     }
950     if (!dev->exp.exp_cap) {
951         return false;
952     }
953 
954     return pci_get_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL2) &
955         PCI_EXP_DEVCTL2_ARI;
956 }
957 
958 /**************************************************************************
959  * pci express extended capability list management functions
960  * uint16_t ext_cap_id (16 bit)
961  * uint8_t cap_ver (4 bit)
962  * uint16_t cap_offset (12 bit)
963  * uint16_t ext_cap_size
964  */
965 
966 /* Passing a cap_id value > 0xffff will return 0 and put end of list in prev */
967 static uint16_t pcie_find_capability_list(PCIDevice *dev, uint32_t cap_id,
968                                           uint16_t *prev_p)
969 {
970     uint16_t prev = 0;
971     uint16_t next;
972     uint32_t header = pci_get_long(dev->config + PCI_CONFIG_SPACE_SIZE);
973 
974     if (!header) {
975         /* no extended capability */
976         next = 0;
977         goto out;
978     }
979     for (next = PCI_CONFIG_SPACE_SIZE; next;
980          prev = next, next = PCI_EXT_CAP_NEXT(header)) {
981 
982         assert(next >= PCI_CONFIG_SPACE_SIZE);
983         assert(next <= PCIE_CONFIG_SPACE_SIZE - 8);
984 
985         header = pci_get_long(dev->config + next);
986         if (PCI_EXT_CAP_ID(header) == cap_id) {
987             break;
988         }
989     }
990 
991 out:
992     if (prev_p) {
993         *prev_p = prev;
994     }
995     return next;
996 }
997 
998 uint16_t pcie_find_capability(PCIDevice *dev, uint16_t cap_id)
999 {
1000     return pcie_find_capability_list(dev, cap_id, NULL);
1001 }
1002 
1003 static void pcie_ext_cap_set_next(PCIDevice *dev, uint16_t pos, uint16_t next)
1004 {
1005     uint32_t header = pci_get_long(dev->config + pos);
1006     assert(!(next & (PCI_EXT_CAP_ALIGN - 1)));
1007     header = (header & ~PCI_EXT_CAP_NEXT_MASK) |
1008         ((next << PCI_EXT_CAP_NEXT_SHIFT) & PCI_EXT_CAP_NEXT_MASK);
1009     pci_set_long(dev->config + pos, header);
1010 }
1011 
1012 /*
1013  * Caller must supply valid (offset, size) such that the range wouldn't
1014  * overlap with other capability or other registers.
1015  * This function doesn't check it.
1016  */
1017 void pcie_add_capability(PCIDevice *dev,
1018                          uint16_t cap_id, uint8_t cap_ver,
1019                          uint16_t offset, uint16_t size)
1020 {
1021     assert(offset >= PCI_CONFIG_SPACE_SIZE);
1022     assert(offset < (uint16_t)(offset + size));
1023     assert((uint16_t)(offset + size) <= PCIE_CONFIG_SPACE_SIZE);
1024     assert(size >= 8);
1025     assert(pci_is_express(dev));
1026 
1027     if (offset != PCI_CONFIG_SPACE_SIZE) {
1028         uint16_t prev;
1029 
1030         /*
1031          * 0xffffffff is not a valid cap id (it's a 16 bit field). use
1032          * internally to find the last capability in the linked list.
1033          */
1034         pcie_find_capability_list(dev, 0xffffffff, &prev);
1035         assert(prev >= PCI_CONFIG_SPACE_SIZE);
1036         pcie_ext_cap_set_next(dev, prev, offset);
1037     }
1038     pci_set_long(dev->config + offset, PCI_EXT_CAP(cap_id, cap_ver, 0));
1039 
1040     /* Make capability read-only by default */
1041     memset(dev->wmask + offset, 0, size);
1042     memset(dev->w1cmask + offset, 0, size);
1043     /* Check capability by default */
1044     memset(dev->cmask + offset, 0xFF, size);
1045 }
1046 
1047 /*
1048  * Sync the PCIe Link Status negotiated speed and width of a bridge with the
1049  * downstream device.  If downstream device is not present, re-write with the
1050  * Link Capability fields.  If downstream device reports invalid width or
1051  * speed, replace with minimum values (LnkSta fields are RsvdZ on VFs but such
1052  * values interfere with PCIe native hotplug detecting new devices).  Limit
1053  * width and speed to bridge capabilities for compatibility.  Use config_read
1054  * to access the downstream device since it could be an assigned device with
1055  * volatile link information.
1056  */
1057 void pcie_sync_bridge_lnk(PCIDevice *bridge_dev)
1058 {
1059     PCIBridge *br = PCI_BRIDGE(bridge_dev);
1060     PCIBus *bus = pci_bridge_get_sec_bus(br);
1061     PCIDevice *target = bus->devices[0];
1062     uint8_t *exp_cap = bridge_dev->config + bridge_dev->exp.exp_cap;
1063     uint16_t lnksta, lnkcap = pci_get_word(exp_cap + PCI_EXP_LNKCAP);
1064 
1065     if (!target || !target->exp.exp_cap) {
1066         lnksta = lnkcap;
1067     } else {
1068         lnksta = target->config_read(target,
1069                                      target->exp.exp_cap + PCI_EXP_LNKSTA,
1070                                      sizeof(lnksta));
1071 
1072         if ((lnksta & PCI_EXP_LNKSTA_NLW) > (lnkcap & PCI_EXP_LNKCAP_MLW)) {
1073             lnksta &= ~PCI_EXP_LNKSTA_NLW;
1074             lnksta |= lnkcap & PCI_EXP_LNKCAP_MLW;
1075         } else if (!(lnksta & PCI_EXP_LNKSTA_NLW)) {
1076             lnksta |= QEMU_PCI_EXP_LNKSTA_NLW(QEMU_PCI_EXP_LNK_X1);
1077         }
1078 
1079         if ((lnksta & PCI_EXP_LNKSTA_CLS) > (lnkcap & PCI_EXP_LNKCAP_SLS)) {
1080             lnksta &= ~PCI_EXP_LNKSTA_CLS;
1081             lnksta |= lnkcap & PCI_EXP_LNKCAP_SLS;
1082         } else if (!(lnksta & PCI_EXP_LNKSTA_CLS)) {
1083             lnksta |= QEMU_PCI_EXP_LNKSTA_CLS(QEMU_PCI_EXP_LNK_2_5GT);
1084         }
1085     }
1086 
1087     pci_word_test_and_clear_mask(exp_cap + PCI_EXP_LNKSTA,
1088                                  PCI_EXP_LNKSTA_CLS | PCI_EXP_LNKSTA_NLW);
1089     pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA, lnksta &
1090                                (PCI_EXP_LNKSTA_CLS | PCI_EXP_LNKSTA_NLW));
1091 }
1092 
1093 /**************************************************************************
1094  * pci express extended capability helper functions
1095  */
1096 
1097 /* ARI */
1098 void pcie_ari_init(PCIDevice *dev, uint16_t offset)
1099 {
1100     uint16_t nextfn = dev->cap_present & QEMU_PCIE_ARI_NEXTFN_1 ? 1 : 0;
1101 
1102     pcie_add_capability(dev, PCI_EXT_CAP_ID_ARI, PCI_ARI_VER,
1103                         offset, PCI_ARI_SIZEOF);
1104     pci_set_long(dev->config + offset + PCI_ARI_CAP, (nextfn & 0xff) << 8);
1105 }
1106 
1107 void pcie_dev_ser_num_init(PCIDevice *dev, uint16_t offset, uint64_t ser_num)
1108 {
1109     static const int pci_dsn_ver = 1;
1110     static const int pci_dsn_cap = 4;
1111 
1112     pcie_add_capability(dev, PCI_EXT_CAP_ID_DSN, pci_dsn_ver, offset,
1113                         PCI_EXT_CAP_DSN_SIZEOF);
1114     pci_set_quad(dev->config + offset + pci_dsn_cap, ser_num);
1115 }
1116 
1117 void pcie_ats_init(PCIDevice *dev, uint16_t offset, bool aligned)
1118 {
1119     pcie_add_capability(dev, PCI_EXT_CAP_ID_ATS, 0x1,
1120                         offset, PCI_EXT_CAP_ATS_SIZEOF);
1121 
1122     dev->exp.ats_cap = offset;
1123 
1124     /* Invalidate Queue Depth 0 */
1125     if (aligned) {
1126         pci_set_word(dev->config + offset + PCI_ATS_CAP,
1127                      PCI_ATS_CAP_PAGE_ALIGNED);
1128     }
1129     /* STU 0, Disabled by default */
1130     pci_set_word(dev->config + offset + PCI_ATS_CTRL, 0);
1131 
1132     pci_set_word(dev->wmask + dev->exp.ats_cap + PCI_ATS_CTRL, 0x800f);
1133 }
1134 
1135 /* ACS (Access Control Services) */
1136 void pcie_acs_init(PCIDevice *dev, uint16_t offset)
1137 {
1138     bool is_downstream = pci_is_express_downstream_port(dev);
1139     uint16_t cap_bits = 0;
1140 
1141     /* For endpoints, only multifunction devs may have an ACS capability: */
1142     assert(is_downstream ||
1143            (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) ||
1144            PCI_FUNC(dev->devfn));
1145 
1146     pcie_add_capability(dev, PCI_EXT_CAP_ID_ACS, PCI_ACS_VER, offset,
1147                         PCI_ACS_SIZEOF);
1148     dev->exp.acs_cap = offset;
1149 
1150     if (is_downstream) {
1151         /*
1152          * Downstream ports must implement SV, TB, RR, CR, UF, and DT (with
1153          * caveats on the latter four that we ignore for simplicity).
1154          * Endpoints may also implement a subset of ACS capabilities,
1155          * but these are optional if the endpoint does not support
1156          * peer-to-peer between functions and thus omitted here.
1157          */
1158         cap_bits = PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
1159             PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT;
1160     }
1161 
1162     pci_set_word(dev->config + offset + PCI_ACS_CAP, cap_bits);
1163     pci_set_word(dev->wmask + offset + PCI_ACS_CTRL, cap_bits);
1164 }
1165 
1166 void pcie_acs_reset(PCIDevice *dev)
1167 {
1168     if (dev->exp.acs_cap) {
1169         pci_set_word(dev->config + dev->exp.acs_cap + PCI_ACS_CTRL, 0);
1170     }
1171 }
1172