1 /*
2 * pcie.c
3 *
4 * Copyright (c) 2010 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "hw/pci/pci_bridge.h"
24 #include "hw/pci/pcie.h"
25 #include "hw/pci/msix.h"
26 #include "hw/pci/msi.h"
27 #include "hw/pci/pci_bus.h"
28 #include "hw/pci/pcie_regs.h"
29 #include "hw/pci/pcie_port.h"
30 #include "qemu/range.h"
31 #include "trace.h"
32
33 //#define DEBUG_PCIE
34 #ifdef DEBUG_PCIE
35 # define PCIE_DPRINTF(fmt, ...) \
36 fprintf(stderr, "%s:%d " fmt, __func__, __LINE__, ## __VA_ARGS__)
37 #else
38 # define PCIE_DPRINTF(fmt, ...) do {} while (0)
39 #endif
40 #define PCIE_DEV_PRINTF(dev, fmt, ...) \
41 PCIE_DPRINTF("%s:%x "fmt, (dev)->name, (dev)->devfn, ## __VA_ARGS__)
42
pcie_sltctl_powered_off(uint16_t sltctl)43 static bool pcie_sltctl_powered_off(uint16_t sltctl)
44 {
45 return (sltctl & PCI_EXP_SLTCTL_PCC) == PCI_EXP_SLTCTL_PWR_OFF
46 && (sltctl & PCI_EXP_SLTCTL_PIC) == PCI_EXP_SLTCTL_PWR_IND_OFF;
47 }
48
pcie_led_state_to_str(uint16_t value)49 static const char *pcie_led_state_to_str(uint16_t value)
50 {
51 switch (value) {
52 case PCI_EXP_SLTCTL_PWR_IND_ON:
53 case PCI_EXP_SLTCTL_ATTN_IND_ON:
54 return "on";
55 case PCI_EXP_SLTCTL_PWR_IND_BLINK:
56 case PCI_EXP_SLTCTL_ATTN_IND_BLINK:
57 return "blink";
58 case PCI_EXP_SLTCTL_PWR_IND_OFF:
59 case PCI_EXP_SLTCTL_ATTN_IND_OFF:
60 return "off";
61 default:
62 return "invalid";
63 }
64 }
65
66 /***************************************************************************
67 * pci express capability helper functions
68 */
69
70 static void
pcie_cap_v1_fill(PCIDevice * dev,uint8_t port,uint8_t type,uint8_t version)71 pcie_cap_v1_fill(PCIDevice *dev, uint8_t port, uint8_t type, uint8_t version)
72 {
73 uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
74 uint8_t *cmask = dev->cmask + dev->exp.exp_cap;
75
76 /* capability register
77 interrupt message number defaults to 0 */
78 pci_set_word(exp_cap + PCI_EXP_FLAGS,
79 ((type << PCI_EXP_FLAGS_TYPE_SHIFT) & PCI_EXP_FLAGS_TYPE) |
80 version);
81
82 /* device capability register
83 * table 7-12:
84 * roll based error reporting bit must be set by all
85 * Functions conforming to the ECN, PCI Express Base
86 * Specification, Revision 1.1., or subsequent PCI Express Base
87 * Specification revisions.
88 */
89 uint32_t devcap = PCI_EXP_DEVCAP_RBER;
90
91 if (dev->cap_present & QEMU_PCIE_EXT_TAG) {
92 devcap = PCI_EXP_DEVCAP_RBER | PCI_EXP_DEVCAP_EXT_TAG;
93 }
94
95 pci_set_long(exp_cap + PCI_EXP_DEVCAP, devcap);
96
97 pci_set_long(exp_cap + PCI_EXP_LNKCAP,
98 (port << PCI_EXP_LNKCAP_PN_SHIFT) |
99 PCI_EXP_LNKCAP_ASPMS_0S |
100 QEMU_PCI_EXP_LNKCAP_MLW(QEMU_PCI_EXP_LNK_X1) |
101 QEMU_PCI_EXP_LNKCAP_MLS(QEMU_PCI_EXP_LNK_2_5GT));
102
103 pci_set_word(exp_cap + PCI_EXP_LNKSTA,
104 QEMU_PCI_EXP_LNKSTA_NLW(QEMU_PCI_EXP_LNK_X1) |
105 QEMU_PCI_EXP_LNKSTA_CLS(QEMU_PCI_EXP_LNK_2_5GT));
106
107 /* We changed link status bits over time, and changing them across
108 * migrations is generally fine as hardware changes them too.
109 * Let's not bother checking.
110 */
111 pci_set_word(cmask + PCI_EXP_LNKSTA, 0);
112 }
113
114 /* Includes setting the target speed default */
pcie_cap_fill_lnk(uint8_t * exp_cap,PCIExpLinkWidth width,PCIExpLinkSpeed speed)115 static void pcie_cap_fill_lnk(uint8_t *exp_cap, PCIExpLinkWidth width,
116 PCIExpLinkSpeed speed)
117 {
118 /* Clear and fill LNKCAP from what was configured above */
119 pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKCAP,
120 PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS);
121 pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP,
122 QEMU_PCI_EXP_LNKCAP_MLW(width) |
123 QEMU_PCI_EXP_LNKCAP_MLS(speed));
124
125 if (speed > QEMU_PCI_EXP_LNK_2_5GT) {
126 /*
127 * Target Link Speed defaults to the highest link speed supported by
128 * the component. 2.5GT/s devices are permitted to hardwire to zero.
129 */
130 pci_word_test_and_clear_mask(exp_cap + PCI_EXP_LNKCTL2,
131 PCI_EXP_LNKCTL2_TLS);
132 pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKCTL2,
133 QEMU_PCI_EXP_LNKCAP_MLS(speed) &
134 PCI_EXP_LNKCTL2_TLS);
135 }
136
137 /*
138 * 2.5 & 5.0GT/s can be fully described by LNKCAP, but 8.0GT/s is
139 * actually a reference to the highest bit supported in this register.
140 * We assume the device supports all link speeds.
141 */
142 if (speed > QEMU_PCI_EXP_LNK_5GT) {
143 pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKCAP2, ~0U);
144 pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2,
145 PCI_EXP_LNKCAP2_SLS_2_5GB |
146 PCI_EXP_LNKCAP2_SLS_5_0GB |
147 PCI_EXP_LNKCAP2_SLS_8_0GB);
148 if (speed > QEMU_PCI_EXP_LNK_8GT) {
149 pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2,
150 PCI_EXP_LNKCAP2_SLS_16_0GB);
151 }
152 if (speed > QEMU_PCI_EXP_LNK_16GT) {
153 pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2,
154 PCI_EXP_LNKCAP2_SLS_32_0GB);
155 }
156 if (speed > QEMU_PCI_EXP_LNK_32GT) {
157 pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2,
158 PCI_EXP_LNKCAP2_SLS_64_0GB);
159 }
160 }
161 }
162
pcie_cap_fill_link_ep_usp(PCIDevice * dev,PCIExpLinkWidth width,PCIExpLinkSpeed speed)163 void pcie_cap_fill_link_ep_usp(PCIDevice *dev, PCIExpLinkWidth width,
164 PCIExpLinkSpeed speed)
165 {
166 uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
167
168 /*
169 * For an end point or USP need to set the current status as well
170 * as the capabilities.
171 */
172 pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKSTA,
173 PCI_EXP_LNKSTA_CLS | PCI_EXP_LNKSTA_NLW);
174 pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA,
175 QEMU_PCI_EXP_LNKSTA_NLW(width) |
176 QEMU_PCI_EXP_LNKSTA_CLS(speed));
177
178 pcie_cap_fill_lnk(exp_cap, width, speed);
179 }
180
pcie_cap_fill_slot_lnk(PCIDevice * dev)181 static void pcie_cap_fill_slot_lnk(PCIDevice *dev)
182 {
183 PCIESlot *s = (PCIESlot *)object_dynamic_cast(OBJECT(dev), TYPE_PCIE_SLOT);
184 uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
185
186 /* Skip anything that isn't a PCIESlot */
187 if (!s) {
188 return;
189 }
190
191 /*
192 * Link bandwidth notification is required for all root ports and
193 * downstream ports supporting links wider than x1 or multiple link
194 * speeds.
195 */
196 if (s->width > QEMU_PCI_EXP_LNK_X1 ||
197 s->speed > QEMU_PCI_EXP_LNK_2_5GT) {
198 pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP,
199 PCI_EXP_LNKCAP_LBNC);
200 }
201
202 if (s->speed > QEMU_PCI_EXP_LNK_2_5GT) {
203 /*
204 * Hot-plug capable downstream ports and downstream ports supporting
205 * link speeds greater than 5GT/s must hardwire PCI_EXP_LNKCAP_DLLLARC
206 * to 1b. PCI_EXP_LNKCAP_DLLLARC implies PCI_EXP_LNKSTA_DLLLA, which
207 * we also hardwire to 1b here. 2.5GT/s hot-plug slots should also
208 * technically implement this, but it's not done here for compatibility.
209 */
210 pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP,
211 PCI_EXP_LNKCAP_DLLLARC);
212 /* the PCI_EXP_LNKSTA_DLLLA will be set in the hotplug function */
213 }
214
215 pcie_cap_fill_lnk(exp_cap, s->width, s->speed);
216 }
217
pcie_cap_init(PCIDevice * dev,uint8_t offset,uint8_t type,uint8_t port,Error ** errp)218 int pcie_cap_init(PCIDevice *dev, uint8_t offset,
219 uint8_t type, uint8_t port,
220 Error **errp)
221 {
222 /* PCIe cap v2 init */
223 int pos;
224 uint8_t *exp_cap;
225
226 assert(pci_is_express(dev));
227
228 pos = pci_add_capability(dev, PCI_CAP_ID_EXP, offset,
229 PCI_EXP_VER2_SIZEOF, errp);
230 if (pos < 0) {
231 return pos;
232 }
233 dev->exp.exp_cap = pos;
234 exp_cap = dev->config + pos;
235
236 /* Filling values common with v1 */
237 pcie_cap_v1_fill(dev, port, type, PCI_EXP_FLAGS_VER2);
238
239 /* Fill link speed and width options */
240 pcie_cap_fill_slot_lnk(dev);
241
242 /* Filling v2 specific values */
243 pci_set_long(exp_cap + PCI_EXP_DEVCAP2,
244 PCI_EXP_DEVCAP2_EFF | PCI_EXP_DEVCAP2_EETLPP);
245
246 pci_set_word(dev->wmask + pos + PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_EETLPPB);
247
248 if (dev->cap_present & QEMU_PCIE_EXTCAP_INIT) {
249 /* read-only to behave like a 'NULL' Extended Capability Header */
250 pci_set_long(dev->wmask + PCI_CONFIG_SPACE_SIZE, 0);
251 }
252
253 return pos;
254 }
255
pcie_cap_v1_init(PCIDevice * dev,uint8_t offset,uint8_t type,uint8_t port)256 int pcie_cap_v1_init(PCIDevice *dev, uint8_t offset, uint8_t type,
257 uint8_t port)
258 {
259 /* PCIe cap v1 init */
260 int pos;
261 Error *local_err = NULL;
262
263 assert(pci_is_express(dev));
264
265 pos = pci_add_capability(dev, PCI_CAP_ID_EXP, offset,
266 PCI_EXP_VER1_SIZEOF, &local_err);
267 if (pos < 0) {
268 error_report_err(local_err);
269 return pos;
270 }
271 dev->exp.exp_cap = pos;
272
273 pcie_cap_v1_fill(dev, port, type, PCI_EXP_FLAGS_VER1);
274
275 return pos;
276 }
277
278 static int
pcie_endpoint_cap_common_init(PCIDevice * dev,uint8_t offset,uint8_t cap_size)279 pcie_endpoint_cap_common_init(PCIDevice *dev, uint8_t offset, uint8_t cap_size)
280 {
281 uint8_t type = PCI_EXP_TYPE_ENDPOINT;
282 Error *local_err = NULL;
283 int ret;
284
285 /*
286 * Windows guests will report Code 10, device cannot start, if
287 * a regular Endpoint type is exposed on a root complex. These
288 * should instead be Root Complex Integrated Endpoints.
289 */
290 if (pci_bus_is_express(pci_get_bus(dev))
291 && pci_bus_is_root(pci_get_bus(dev))) {
292 type = PCI_EXP_TYPE_RC_END;
293 }
294
295 if (cap_size == PCI_EXP_VER1_SIZEOF) {
296 return pcie_cap_v1_init(dev, offset, type, 0);
297 } else {
298 ret = pcie_cap_init(dev, offset, type, 0, &local_err);
299
300 if (ret < 0) {
301 error_report_err(local_err);
302 }
303
304 return ret;
305 }
306 }
307
pcie_endpoint_cap_init(PCIDevice * dev,uint8_t offset)308 int pcie_endpoint_cap_init(PCIDevice *dev, uint8_t offset)
309 {
310 return pcie_endpoint_cap_common_init(dev, offset, PCI_EXP_VER2_SIZEOF);
311 }
312
pcie_endpoint_cap_v1_init(PCIDevice * dev,uint8_t offset)313 int pcie_endpoint_cap_v1_init(PCIDevice *dev, uint8_t offset)
314 {
315 return pcie_endpoint_cap_common_init(dev, offset, PCI_EXP_VER1_SIZEOF);
316 }
317
pcie_cap_exit(PCIDevice * dev)318 void pcie_cap_exit(PCIDevice *dev)
319 {
320 pci_del_capability(dev, PCI_CAP_ID_EXP, PCI_EXP_VER2_SIZEOF);
321 }
322
pcie_cap_v1_exit(PCIDevice * dev)323 void pcie_cap_v1_exit(PCIDevice *dev)
324 {
325 pci_del_capability(dev, PCI_CAP_ID_EXP, PCI_EXP_VER1_SIZEOF);
326 }
327
pcie_cap_get_type(const PCIDevice * dev)328 uint8_t pcie_cap_get_type(const PCIDevice *dev)
329 {
330 uint32_t pos = dev->exp.exp_cap;
331 assert(pos > 0);
332 return (pci_get_word(dev->config + pos + PCI_EXP_FLAGS) &
333 PCI_EXP_FLAGS_TYPE) >> PCI_EXP_FLAGS_TYPE_SHIFT;
334 }
335
pcie_cap_get_version(const PCIDevice * dev)336 uint8_t pcie_cap_get_version(const PCIDevice *dev)
337 {
338 uint32_t pos = dev->exp.exp_cap;
339 assert(pos > 0);
340 return pci_get_word(dev->config + pos + PCI_EXP_FLAGS) & PCI_EXP_FLAGS_VERS;
341 }
342
343 /* MSI/MSI-X */
344 /* pci express interrupt message number */
345 /* 7.8.2 PCI Express Capabilities Register: Interrupt Message Number */
pcie_cap_flags_set_vector(PCIDevice * dev,uint8_t vector)346 void pcie_cap_flags_set_vector(PCIDevice *dev, uint8_t vector)
347 {
348 uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
349 assert(vector < 32);
350 pci_word_test_and_clear_mask(exp_cap + PCI_EXP_FLAGS, PCI_EXP_FLAGS_IRQ);
351 pci_word_test_and_set_mask(exp_cap + PCI_EXP_FLAGS,
352 vector << PCI_EXP_FLAGS_IRQ_SHIFT);
353 }
354
pcie_cap_flags_get_vector(PCIDevice * dev)355 uint8_t pcie_cap_flags_get_vector(PCIDevice *dev)
356 {
357 return (pci_get_word(dev->config + dev->exp.exp_cap + PCI_EXP_FLAGS) &
358 PCI_EXP_FLAGS_IRQ) >> PCI_EXP_FLAGS_IRQ_SHIFT;
359 }
360
pcie_cap_deverr_init(PCIDevice * dev)361 void pcie_cap_deverr_init(PCIDevice *dev)
362 {
363 uint32_t pos = dev->exp.exp_cap;
364 pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_DEVCAP,
365 PCI_EXP_DEVCAP_RBER);
366 pci_long_test_and_set_mask(dev->wmask + pos + PCI_EXP_DEVCTL,
367 PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE |
368 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
369 pci_long_test_and_set_mask(dev->w1cmask + pos + PCI_EXP_DEVSTA,
370 PCI_EXP_DEVSTA_CED | PCI_EXP_DEVSTA_NFED |
371 PCI_EXP_DEVSTA_FED | PCI_EXP_DEVSTA_URD);
372 }
373
pcie_cap_deverr_reset(PCIDevice * dev)374 void pcie_cap_deverr_reset(PCIDevice *dev)
375 {
376 uint8_t *devctl = dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL;
377 pci_long_test_and_clear_mask(devctl,
378 PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE |
379 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
380 }
381
pcie_cap_lnkctl_init(PCIDevice * dev)382 void pcie_cap_lnkctl_init(PCIDevice *dev)
383 {
384 uint32_t pos = dev->exp.exp_cap;
385 pci_long_test_and_set_mask(dev->wmask + pos + PCI_EXP_LNKCTL,
386 PCI_EXP_LNKCTL_CCC | PCI_EXP_LNKCTL_ES);
387 }
388
pcie_cap_lnkctl_reset(PCIDevice * dev)389 void pcie_cap_lnkctl_reset(PCIDevice *dev)
390 {
391 uint8_t *lnkctl = dev->config + dev->exp.exp_cap + PCI_EXP_LNKCTL;
392 pci_long_test_and_clear_mask(lnkctl,
393 PCI_EXP_LNKCTL_CCC | PCI_EXP_LNKCTL_ES);
394 }
395
hotplug_event_update_event_status(PCIDevice * dev)396 static void hotplug_event_update_event_status(PCIDevice *dev)
397 {
398 uint32_t pos = dev->exp.exp_cap;
399 uint8_t *exp_cap = dev->config + pos;
400 uint16_t sltctl = pci_get_word(exp_cap + PCI_EXP_SLTCTL);
401 uint16_t sltsta = pci_get_word(exp_cap + PCI_EXP_SLTSTA);
402
403 dev->exp.hpev_notified = (sltctl & PCI_EXP_SLTCTL_HPIE) &&
404 (sltsta & sltctl & PCI_EXP_HP_EV_SUPPORTED);
405 }
406
hotplug_event_notify(PCIDevice * dev)407 static void hotplug_event_notify(PCIDevice *dev)
408 {
409 bool prev = dev->exp.hpev_notified;
410
411 hotplug_event_update_event_status(dev);
412
413 if (prev == dev->exp.hpev_notified) {
414 return;
415 }
416
417 /* Note: the logic above does not take into account whether interrupts
418 * are masked. The result is that interrupt will be sent when it is
419 * subsequently unmasked. This appears to be legal: Section 6.7.3.4:
420 * The Port may optionally send an MSI when there are hot-plug events that
421 * occur while interrupt generation is disabled, and interrupt generation is
422 * subsequently enabled. */
423 if (msix_enabled(dev)) {
424 msix_notify(dev, pcie_cap_flags_get_vector(dev));
425 } else if (msi_enabled(dev)) {
426 msi_notify(dev, pcie_cap_flags_get_vector(dev));
427 } else if (pci_intx(dev) != -1) {
428 pci_set_irq(dev, dev->exp.hpev_notified);
429 }
430 }
431
hotplug_event_clear(PCIDevice * dev)432 static void hotplug_event_clear(PCIDevice *dev)
433 {
434 hotplug_event_update_event_status(dev);
435 if (!msix_enabled(dev) && !msi_enabled(dev) && pci_intx(dev) != -1 &&
436 !dev->exp.hpev_notified) {
437 pci_irq_deassert(dev);
438 }
439 }
440
pcie_cap_slot_enable_power(PCIDevice * dev)441 void pcie_cap_slot_enable_power(PCIDevice *dev)
442 {
443 uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
444 uint32_t sltcap = pci_get_long(exp_cap + PCI_EXP_SLTCAP);
445
446 if (sltcap & PCI_EXP_SLTCAP_PCP) {
447 pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL,
448 PCI_EXP_SLTCTL_PCC);
449 }
450 }
451
pcie_set_power_device(PCIBus * bus,PCIDevice * dev,void * opaque)452 static void pcie_set_power_device(PCIBus *bus, PCIDevice *dev, void *opaque)
453 {
454 bool *power = opaque;
455
456 pci_set_power(dev, *power);
457 }
458
pcie_cap_update_power(PCIDevice * hotplug_dev)459 static void pcie_cap_update_power(PCIDevice *hotplug_dev)
460 {
461 uint8_t *exp_cap = hotplug_dev->config + hotplug_dev->exp.exp_cap;
462 PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(hotplug_dev));
463 uint32_t sltcap = pci_get_long(exp_cap + PCI_EXP_SLTCAP);
464 uint16_t sltctl = pci_get_word(exp_cap + PCI_EXP_SLTCTL);
465 bool power = true;
466
467 if (sltcap & PCI_EXP_SLTCAP_PCP) {
468 power = (sltctl & PCI_EXP_SLTCTL_PCC) == PCI_EXP_SLTCTL_PWR_ON;
469 /* Don't we need to check also (sltctl & PCI_EXP_SLTCTL_PIC) ? */
470 }
471
472 pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
473 pcie_set_power_device, &power);
474 }
475
476 /*
477 * A PCI Express Hot-Plug Event has occurred, so update slot status register
478 * and notify OS of the event if necessary.
479 *
480 * 6.7.3 PCI Express Hot-Plug Events
481 * 6.7.3.4 Software Notification of Hot-Plug Events
482 */
pcie_cap_slot_event(PCIDevice * dev,PCIExpressHotPlugEvent event)483 static void pcie_cap_slot_event(PCIDevice *dev, PCIExpressHotPlugEvent event)
484 {
485 /* Minor optimization: if nothing changed - no event is needed. */
486 if (pci_word_test_and_set_mask(dev->config + dev->exp.exp_cap +
487 PCI_EXP_SLTSTA, event) == event) {
488 return;
489 }
490 hotplug_event_notify(dev);
491 }
492
pcie_cap_slot_plug_common(PCIDevice * hotplug_dev,DeviceState * dev,Error ** errp)493 static void pcie_cap_slot_plug_common(PCIDevice *hotplug_dev, DeviceState *dev,
494 Error **errp)
495 {
496 uint8_t *exp_cap = hotplug_dev->config + hotplug_dev->exp.exp_cap;
497 uint16_t sltsta = pci_get_word(exp_cap + PCI_EXP_SLTSTA);
498
499 PCIE_DEV_PRINTF(PCI_DEVICE(dev), "hotplug state: 0x%x\n", sltsta);
500 if (sltsta & PCI_EXP_SLTSTA_EIS) {
501 /* the slot is electromechanically locked.
502 * This error is propagated up to qdev and then to HMP/QMP.
503 */
504 error_setg_errno(errp, EBUSY, "slot is electromechanically locked");
505 }
506 }
507
pcie_cap_slot_pre_plug_cb(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)508 void pcie_cap_slot_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
509 Error **errp)
510 {
511 PCIDevice *hotplug_pdev = PCI_DEVICE(hotplug_dev);
512 uint8_t *exp_cap = hotplug_pdev->config + hotplug_pdev->exp.exp_cap;
513 uint32_t sltcap = pci_get_word(exp_cap + PCI_EXP_SLTCAP);
514
515 /* Check if hot-plug is disabled on the slot */
516 if (dev->hotplugged && (sltcap & PCI_EXP_SLTCAP_HPC) == 0) {
517 error_setg(errp, "Hot-plug failed: unsupported by the port device '%s'",
518 DEVICE(hotplug_pdev)->id);
519 return;
520 }
521
522 pcie_cap_slot_plug_common(PCI_DEVICE(hotplug_dev), dev, errp);
523 }
524
pcie_cap_slot_plug_cb(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)525 void pcie_cap_slot_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
526 Error **errp)
527 {
528 PCIDevice *hotplug_pdev = PCI_DEVICE(hotplug_dev);
529 uint8_t *exp_cap = hotplug_pdev->config + hotplug_pdev->exp.exp_cap;
530 PCIDevice *pci_dev = PCI_DEVICE(dev);
531 uint32_t lnkcap = pci_get_long(exp_cap + PCI_EXP_LNKCAP);
532
533 if (pci_is_vf(pci_dev)) {
534 /* Virtual function cannot be physically disconnected */
535 return;
536 }
537
538 /* Don't send event when device is enabled during qemu machine creation:
539 * it is present on boot, no hotplug event is necessary. We do send an
540 * event when the device is disabled later. */
541 if (!dev->hotplugged) {
542 pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA,
543 PCI_EXP_SLTSTA_PDS);
544 if (pci_dev->cap_present & QEMU_PCIE_LNKSTA_DLLLA ||
545 (lnkcap & PCI_EXP_LNKCAP_DLLLARC)) {
546 pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA,
547 PCI_EXP_LNKSTA_DLLLA);
548 }
549 pcie_cap_update_power(hotplug_pdev);
550 return;
551 }
552
553 /* To enable multifunction hot-plug, we just ensure the function
554 * 0 added last. When function 0 is added, we set the sltsta and
555 * inform OS via event notification.
556 */
557 if (pci_get_function_0(pci_dev)) {
558 pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA,
559 PCI_EXP_SLTSTA_PDS);
560 if (pci_dev->cap_present & QEMU_PCIE_LNKSTA_DLLLA ||
561 (lnkcap & PCI_EXP_LNKCAP_DLLLARC)) {
562 pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA,
563 PCI_EXP_LNKSTA_DLLLA);
564 }
565 pcie_cap_slot_event(hotplug_pdev,
566 PCI_EXP_HP_EV_PDC | PCI_EXP_HP_EV_ABP);
567 pcie_cap_update_power(hotplug_pdev);
568 }
569 }
570
pcie_cap_slot_unplug_cb(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)571 void pcie_cap_slot_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
572 Error **errp)
573 {
574 qdev_unrealize(dev);
575 }
576
pcie_unplug_device(PCIBus * bus,PCIDevice * dev,void * opaque)577 static void pcie_unplug_device(PCIBus *bus, PCIDevice *dev, void *opaque)
578 {
579 HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(DEVICE(dev));
580
581 if (dev->partially_hotplugged) {
582 dev->qdev.pending_deleted_event = false;
583 return;
584 }
585 hotplug_handler_unplug(hotplug_ctrl, DEVICE(dev), &error_abort);
586 object_unparent(OBJECT(dev));
587 }
588
pcie_cap_slot_do_unplug(PCIDevice * dev)589 static void pcie_cap_slot_do_unplug(PCIDevice *dev)
590 {
591 PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev));
592 uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
593 uint32_t lnkcap = pci_get_long(exp_cap + PCI_EXP_LNKCAP);
594
595 pci_for_each_device_under_bus(sec_bus, pcie_unplug_device, NULL);
596
597 pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA,
598 PCI_EXP_SLTSTA_PDS);
599 if (dev->cap_present & QEMU_PCIE_LNKSTA_DLLLA ||
600 (lnkcap & PCI_EXP_LNKCAP_DLLLARC)) {
601 pci_word_test_and_clear_mask(exp_cap + PCI_EXP_LNKSTA,
602 PCI_EXP_LNKSTA_DLLLA);
603 }
604 pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA,
605 PCI_EXP_SLTSTA_PDC);
606 }
607
pcie_cap_slot_unplug_request_cb(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)608 void pcie_cap_slot_unplug_request_cb(HotplugHandler *hotplug_dev,
609 DeviceState *dev, Error **errp)
610 {
611 Error *local_err = NULL;
612 PCIDevice *pci_dev = PCI_DEVICE(dev);
613 PCIBus *bus = pci_get_bus(pci_dev);
614 PCIDevice *hotplug_pdev = PCI_DEVICE(hotplug_dev);
615 uint8_t *exp_cap = hotplug_pdev->config + hotplug_pdev->exp.exp_cap;
616 uint32_t sltcap = pci_get_word(exp_cap + PCI_EXP_SLTCAP);
617 uint16_t sltctl = pci_get_word(exp_cap + PCI_EXP_SLTCTL);
618
619 /* Check if hot-unplug is disabled on the slot */
620 if ((sltcap & PCI_EXP_SLTCAP_HPC) == 0) {
621 error_setg(errp, "Hot-unplug failed: "
622 "unsupported by the port device '%s'",
623 DEVICE(hotplug_pdev)->id);
624 return;
625 }
626
627 pcie_cap_slot_plug_common(hotplug_pdev, dev, &local_err);
628 if (local_err) {
629 error_propagate(errp, local_err);
630 return;
631 }
632
633 if ((sltctl & PCI_EXP_SLTCTL_PIC) == PCI_EXP_SLTCTL_PWR_IND_BLINK) {
634 error_setg(errp, "Hot-unplug failed: "
635 "guest is busy (power indicator blinking)");
636 return;
637 }
638
639 dev->pending_deleted_event = true;
640 dev->pending_deleted_expires_ms =
641 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 5000; /* 5 secs */
642
643 /* In case user cancel the operation of multi-function hot-add,
644 * remove the function that is unexposed to guest individually,
645 * without interaction with guest.
646 */
647 if (pci_dev->devfn &&
648 !bus->devices[0]) {
649 pcie_unplug_device(bus, pci_dev, NULL);
650
651 return;
652 }
653
654 if (pcie_sltctl_powered_off(sltctl)) {
655 /* slot is powered off -> unplug without round-trip to the guest */
656 pcie_cap_slot_do_unplug(hotplug_pdev);
657 hotplug_event_notify(hotplug_pdev);
658 pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA,
659 PCI_EXP_SLTSTA_ABP);
660 return;
661 }
662
663 pcie_cap_slot_push_attention_button(hotplug_pdev);
664 }
665
666 /* pci express slot for pci express root/downstream port
667 PCI express capability slot registers */
pcie_cap_slot_init(PCIDevice * dev,PCIESlot * s)668 void pcie_cap_slot_init(PCIDevice *dev, PCIESlot *s)
669 {
670 uint32_t pos = dev->exp.exp_cap;
671
672 pci_word_test_and_set_mask(dev->config + pos + PCI_EXP_FLAGS,
673 PCI_EXP_FLAGS_SLOT);
674
675 pci_long_test_and_clear_mask(dev->config + pos + PCI_EXP_SLTCAP,
676 ~PCI_EXP_SLTCAP_PSN);
677 pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCAP,
678 (s->slot << PCI_EXP_SLTCAP_PSN_SHIFT) |
679 PCI_EXP_SLTCAP_EIP |
680 PCI_EXP_SLTCAP_PIP |
681 PCI_EXP_SLTCAP_AIP |
682 PCI_EXP_SLTCAP_ABP);
683
684 /*
685 * Expose native hot-plug on all bridges if hot-plug is enabled on the slot.
686 * (unless broken 6.1 ABI is enforced for compat reasons)
687 */
688 if (s->hotplug &&
689 (!s->hide_native_hotplug_cap || DEVICE(dev)->hotplugged)) {
690 pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCAP,
691 PCI_EXP_SLTCAP_HPS |
692 PCI_EXP_SLTCAP_HPC);
693 }
694
695 if (dev->cap_present & QEMU_PCIE_SLTCAP_PCP) {
696 pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCAP,
697 PCI_EXP_SLTCAP_PCP);
698 pci_word_test_and_clear_mask(dev->config + pos + PCI_EXP_SLTCTL,
699 PCI_EXP_SLTCTL_PCC);
700 pci_word_test_and_set_mask(dev->wmask + pos + PCI_EXP_SLTCTL,
701 PCI_EXP_SLTCTL_PCC);
702 }
703
704 pci_word_test_and_clear_mask(dev->config + pos + PCI_EXP_SLTCTL,
705 PCI_EXP_SLTCTL_PIC |
706 PCI_EXP_SLTCTL_AIC);
707 pci_word_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCTL,
708 PCI_EXP_SLTCTL_PWR_IND_OFF |
709 PCI_EXP_SLTCTL_ATTN_IND_OFF);
710 pci_word_test_and_set_mask(dev->wmask + pos + PCI_EXP_SLTCTL,
711 PCI_EXP_SLTCTL_PIC |
712 PCI_EXP_SLTCTL_AIC |
713 PCI_EXP_SLTCTL_HPIE |
714 PCI_EXP_SLTCTL_CCIE |
715 PCI_EXP_SLTCTL_PDCE |
716 PCI_EXP_SLTCTL_ABPE);
717 /* Although reading PCI_EXP_SLTCTL_EIC returns always 0,
718 * make the bit writable here in order to detect 1b is written.
719 * pcie_cap_slot_write_config() test-and-clear the bit, so
720 * this bit always returns 0 to the guest.
721 */
722 pci_word_test_and_set_mask(dev->wmask + pos + PCI_EXP_SLTCTL,
723 PCI_EXP_SLTCTL_EIC);
724
725 pci_word_test_and_set_mask(dev->w1cmask + pos + PCI_EXP_SLTSTA,
726 PCI_EXP_HP_EV_SUPPORTED);
727
728 /* Avoid migration abortion when this device hot-removed by guest */
729 pci_word_test_and_clear_mask(dev->cmask + pos + PCI_EXP_SLTSTA,
730 PCI_EXP_SLTSTA_PDS);
731
732 dev->exp.hpev_notified = false;
733
734 qbus_set_hotplug_handler(BUS(pci_bridge_get_sec_bus(PCI_BRIDGE(dev))),
735 OBJECT(dev));
736 }
737
pcie_cap_slot_reset(PCIDevice * dev)738 void pcie_cap_slot_reset(PCIDevice *dev)
739 {
740 uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
741 uint8_t port_type = pcie_cap_get_type(dev);
742
743 assert(port_type == PCI_EXP_TYPE_DOWNSTREAM ||
744 port_type == PCI_EXP_TYPE_ROOT_PORT);
745
746 PCIE_DEV_PRINTF(dev, "reset\n");
747
748 pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL,
749 PCI_EXP_SLTCTL_EIC |
750 PCI_EXP_SLTCTL_PIC |
751 PCI_EXP_SLTCTL_AIC |
752 PCI_EXP_SLTCTL_HPIE |
753 PCI_EXP_SLTCTL_CCIE |
754 PCI_EXP_SLTCTL_PDCE |
755 PCI_EXP_SLTCTL_ABPE);
756 pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL,
757 PCI_EXP_SLTCTL_PWR_IND_OFF |
758 PCI_EXP_SLTCTL_ATTN_IND_OFF);
759
760 if (dev->cap_present & QEMU_PCIE_SLTCAP_PCP) {
761 /* Downstream ports enforce device number 0. */
762 bool populated = pci_bridge_get_sec_bus(PCI_BRIDGE(dev))->devices[0];
763 uint16_t pic;
764
765 if (populated) {
766 pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL,
767 PCI_EXP_SLTCTL_PCC);
768 } else {
769 pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL,
770 PCI_EXP_SLTCTL_PCC);
771 }
772
773 pic = populated ?
774 PCI_EXP_SLTCTL_PWR_IND_ON : PCI_EXP_SLTCTL_PWR_IND_OFF;
775 pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL, pic);
776 }
777
778 pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA,
779 PCI_EXP_SLTSTA_EIS |/* on reset,
780 the lock is released */
781 PCI_EXP_SLTSTA_CC |
782 PCI_EXP_SLTSTA_PDC |
783 PCI_EXP_SLTSTA_ABP);
784
785 pcie_cap_update_power(dev);
786 hotplug_event_update_event_status(dev);
787 }
788
pcie_cap_slot_get(PCIDevice * dev,uint16_t * slt_ctl,uint16_t * slt_sta)789 void pcie_cap_slot_get(PCIDevice *dev, uint16_t *slt_ctl, uint16_t *slt_sta)
790 {
791 uint32_t pos = dev->exp.exp_cap;
792 uint8_t *exp_cap = dev->config + pos;
793 *slt_ctl = pci_get_word(exp_cap + PCI_EXP_SLTCTL);
794 *slt_sta = pci_get_word(exp_cap + PCI_EXP_SLTSTA);
795 }
796
find_child_fn(PCIBus * bus,PCIDevice * dev,void * opaque)797 static void find_child_fn(PCIBus *bus, PCIDevice *dev, void *opaque)
798 {
799 PCIDevice **child = opaque;
800
801 if (!*child) {
802 *child = dev;
803 }
804 }
805
806 /*
807 * Returns the plugged device or first function of multifunction plugged device
808 */
pcie_cap_slot_find_child(PCIDevice * dev)809 static PCIDevice *pcie_cap_slot_find_child(PCIDevice *dev)
810 {
811 PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev));
812 PCIDevice *child = NULL;
813
814 pci_for_each_device(sec_bus, pci_bus_num(sec_bus), find_child_fn, &child);
815
816 return child;
817 }
818
pcie_cap_slot_write_config(PCIDevice * dev,uint16_t old_slt_ctl,uint16_t old_slt_sta,uint32_t addr,uint32_t val,int len)819 void pcie_cap_slot_write_config(PCIDevice *dev,
820 uint16_t old_slt_ctl, uint16_t old_slt_sta,
821 uint32_t addr, uint32_t val, int len)
822 {
823 uint32_t pos = dev->exp.exp_cap;
824 uint8_t *exp_cap = dev->config + pos;
825 uint16_t sltsta = pci_get_word(exp_cap + PCI_EXP_SLTSTA);
826
827 if (ranges_overlap(addr, len, pos + PCI_EXP_SLTSTA, 2)) {
828 /*
829 * Guests tend to clears all bits during init.
830 * If they clear bits that weren't set this is racy and will lose events:
831 * not a big problem for manual button presses, but a problem for us.
832 * As a work-around, detect this and revert status to what it was
833 * before the write.
834 *
835 * Note: in theory this can be detected as a duplicate button press
836 * which cancels the previous press. Does not seem to happen in
837 * practice as guests seem to only have this bug during init.
838 */
839 #define PCIE_SLOT_EVENTS (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | \
840 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | \
841 PCI_EXP_SLTSTA_CC)
842
843 if (val & ~old_slt_sta & PCIE_SLOT_EVENTS) {
844 sltsta = (sltsta & ~PCIE_SLOT_EVENTS) | (old_slt_sta & PCIE_SLOT_EVENTS);
845 pci_set_word(exp_cap + PCI_EXP_SLTSTA, sltsta);
846 }
847 hotplug_event_clear(dev);
848 }
849
850 if (!ranges_overlap(addr, len, pos + PCI_EXP_SLTCTL, 2)) {
851 return;
852 }
853
854 if (pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL,
855 PCI_EXP_SLTCTL_EIC)) {
856 sltsta ^= PCI_EXP_SLTSTA_EIS; /* toggle PCI_EXP_SLTSTA_EIS bit */
857 pci_set_word(exp_cap + PCI_EXP_SLTSTA, sltsta);
858 PCIE_DEV_PRINTF(dev, "PCI_EXP_SLTCTL_EIC: "
859 "sltsta -> 0x%02"PRIx16"\n",
860 sltsta);
861 }
862
863 if (trace_event_get_state_backends(TRACE_PCIE_CAP_SLOT_WRITE_CONFIG)) {
864 DeviceState *parent = DEVICE(dev);
865 DeviceState *child = DEVICE(pcie_cap_slot_find_child(dev));
866
867 trace_pcie_cap_slot_write_config(
868 parent->canonical_path,
869 child ? child->canonical_path : "no-child",
870 (sltsta & PCI_EXP_SLTSTA_PDS) ? "present" : "not present",
871 pcie_led_state_to_str(old_slt_ctl & PCI_EXP_SLTCTL_PIC),
872 pcie_led_state_to_str(val & PCI_EXP_SLTCTL_PIC),
873 pcie_led_state_to_str(old_slt_ctl & PCI_EXP_SLTCTL_AIC),
874 pcie_led_state_to_str(val & PCI_EXP_SLTCTL_AIC),
875 (old_slt_ctl & PCI_EXP_SLTCTL_PWR_OFF) ? "off" : "on",
876 (val & PCI_EXP_SLTCTL_PWR_OFF) ? "off" : "on");
877 }
878
879 /*
880 * If the slot is populated, power indicator is off and power
881 * controller is off, it is safe to detach the devices.
882 *
883 * Note: don't detach if condition was already true:
884 * this is a work around for guests that overwrite
885 * control of powered off slots before powering them on.
886 */
887 if ((sltsta & PCI_EXP_SLTSTA_PDS) && pcie_sltctl_powered_off(val) &&
888 !pcie_sltctl_powered_off(old_slt_ctl))
889 {
890 pcie_cap_slot_do_unplug(dev);
891 }
892 pcie_cap_update_power(dev);
893
894 hotplug_event_notify(dev);
895
896 /*
897 * 6.7.3.2 Command Completed Events
898 *
899 * Software issues a command to a hot-plug capable Downstream Port by
900 * issuing a write transaction that targets any portion of the Port’s Slot
901 * Control register. A single write to the Slot Control register is
902 * considered to be a single command, even if the write affects more than
903 * one field in the Slot Control register. In response to this transaction,
904 * the Port must carry out the requested actions and then set the
905 * associated status field for the command completed event. */
906
907 /* Real hardware might take a while to complete requested command because
908 * physical movement would be involved like locking the electromechanical
909 * lock. However in our case, command is completed instantaneously above,
910 * so send a command completion event right now.
911 */
912 pcie_cap_slot_event(dev, PCI_EXP_HP_EV_CCI);
913 }
914
pcie_cap_slot_post_load(void * opaque,int version_id)915 int pcie_cap_slot_post_load(void *opaque, int version_id)
916 {
917 PCIDevice *dev = opaque;
918 hotplug_event_update_event_status(dev);
919 pcie_cap_update_power(dev);
920 return 0;
921 }
922
pcie_cap_slot_push_attention_button(PCIDevice * dev)923 void pcie_cap_slot_push_attention_button(PCIDevice *dev)
924 {
925 pcie_cap_slot_event(dev, PCI_EXP_HP_EV_ABP);
926 }
927
928 /* root control/capabilities/status. PME isn't emulated for now */
pcie_cap_root_init(PCIDevice * dev)929 void pcie_cap_root_init(PCIDevice *dev)
930 {
931 pci_set_word(dev->wmask + dev->exp.exp_cap + PCI_EXP_RTCTL,
932 PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE |
933 PCI_EXP_RTCTL_SEFEE);
934 }
935
pcie_cap_root_reset(PCIDevice * dev)936 void pcie_cap_root_reset(PCIDevice *dev)
937 {
938 pci_set_word(dev->config + dev->exp.exp_cap + PCI_EXP_RTCTL, 0);
939 }
940
941 /* function level reset(FLR) */
pcie_cap_flr_init(PCIDevice * dev)942 void pcie_cap_flr_init(PCIDevice *dev)
943 {
944 pci_long_test_and_set_mask(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCAP,
945 PCI_EXP_DEVCAP_FLR);
946
947 /* Although reading BCR_FLR returns always 0,
948 * the bit is made writable here in order to detect the 1b is written
949 * pcie_cap_flr_write_config() test-and-clear the bit, so
950 * this bit always returns 0 to the guest.
951 */
952 pci_word_test_and_set_mask(dev->wmask + dev->exp.exp_cap + PCI_EXP_DEVCTL,
953 PCI_EXP_DEVCTL_BCR_FLR);
954 }
955
pcie_cap_flr_write_config(PCIDevice * dev,uint32_t addr,uint32_t val,int len)956 void pcie_cap_flr_write_config(PCIDevice *dev,
957 uint32_t addr, uint32_t val, int len)
958 {
959 uint8_t *devctl = dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL;
960 if (pci_get_word(devctl) & PCI_EXP_DEVCTL_BCR_FLR) {
961 /* Clear PCI_EXP_DEVCTL_BCR_FLR after invoking the reset handler
962 so the handler can detect FLR by looking at this bit. */
963 pci_device_reset(dev);
964 pci_word_test_and_clear_mask(devctl, PCI_EXP_DEVCTL_BCR_FLR);
965 }
966 }
967
968 /* Alternative Routing-ID Interpretation (ARI)
969 * forwarding support for root and downstream ports
970 */
pcie_cap_arifwd_init(PCIDevice * dev)971 void pcie_cap_arifwd_init(PCIDevice *dev)
972 {
973 uint32_t pos = dev->exp.exp_cap;
974 pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_DEVCAP2,
975 PCI_EXP_DEVCAP2_ARI);
976 pci_long_test_and_set_mask(dev->wmask + pos + PCI_EXP_DEVCTL2,
977 PCI_EXP_DEVCTL2_ARI);
978 }
979
pcie_cap_arifwd_reset(PCIDevice * dev)980 void pcie_cap_arifwd_reset(PCIDevice *dev)
981 {
982 uint8_t *devctl2 = dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL2;
983 pci_long_test_and_clear_mask(devctl2, PCI_EXP_DEVCTL2_ARI);
984 }
985
pcie_cap_is_arifwd_enabled(const PCIDevice * dev)986 bool pcie_cap_is_arifwd_enabled(const PCIDevice *dev)
987 {
988 if (!pci_is_express(dev)) {
989 return false;
990 }
991 if (!dev->exp.exp_cap) {
992 return false;
993 }
994
995 return pci_get_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL2) &
996 PCI_EXP_DEVCTL2_ARI;
997 }
998
999 /**************************************************************************
1000 * pci express extended capability list management functions
1001 * uint16_t ext_cap_id (16 bit)
1002 * uint8_t cap_ver (4 bit)
1003 * uint16_t cap_offset (12 bit)
1004 * uint16_t ext_cap_size
1005 */
1006
1007 /* Passing a cap_id value > 0xffff will return 0 and put end of list in prev */
pcie_find_capability_list(PCIDevice * dev,uint32_t cap_id,uint16_t * prev_p)1008 static uint16_t pcie_find_capability_list(PCIDevice *dev, uint32_t cap_id,
1009 uint16_t *prev_p)
1010 {
1011 uint16_t prev = 0;
1012 uint16_t next;
1013 uint32_t header = pci_get_long(dev->config + PCI_CONFIG_SPACE_SIZE);
1014
1015 if (!header) {
1016 /* no extended capability */
1017 next = 0;
1018 goto out;
1019 }
1020 for (next = PCI_CONFIG_SPACE_SIZE; next;
1021 prev = next, next = PCI_EXT_CAP_NEXT(header)) {
1022
1023 assert(next >= PCI_CONFIG_SPACE_SIZE);
1024 assert(next <= PCIE_CONFIG_SPACE_SIZE - 8);
1025
1026 header = pci_get_long(dev->config + next);
1027 if (PCI_EXT_CAP_ID(header) == cap_id) {
1028 break;
1029 }
1030 }
1031
1032 out:
1033 if (prev_p) {
1034 *prev_p = prev;
1035 }
1036 return next;
1037 }
1038
pcie_find_capability(PCIDevice * dev,uint16_t cap_id)1039 uint16_t pcie_find_capability(PCIDevice *dev, uint16_t cap_id)
1040 {
1041 return pcie_find_capability_list(dev, cap_id, NULL);
1042 }
1043
pcie_ext_cap_set_next(PCIDevice * dev,uint16_t pos,uint16_t next)1044 static void pcie_ext_cap_set_next(PCIDevice *dev, uint16_t pos, uint16_t next)
1045 {
1046 uint32_t header = pci_get_long(dev->config + pos);
1047 assert(!(next & (PCI_EXT_CAP_ALIGN - 1)));
1048 header = (header & ~PCI_EXT_CAP_NEXT_MASK) |
1049 ((next << PCI_EXT_CAP_NEXT_SHIFT) & PCI_EXT_CAP_NEXT_MASK);
1050 pci_set_long(dev->config + pos, header);
1051 }
1052
1053 /*
1054 * Caller must supply valid (offset, size) such that the range wouldn't
1055 * overlap with other capability or other registers.
1056 * This function doesn't check it.
1057 */
pcie_add_capability(PCIDevice * dev,uint16_t cap_id,uint8_t cap_ver,uint16_t offset,uint16_t size)1058 void pcie_add_capability(PCIDevice *dev,
1059 uint16_t cap_id, uint8_t cap_ver,
1060 uint16_t offset, uint16_t size)
1061 {
1062 assert(offset >= PCI_CONFIG_SPACE_SIZE);
1063 assert(offset < (uint16_t)(offset + size));
1064 assert((uint16_t)(offset + size) <= PCIE_CONFIG_SPACE_SIZE);
1065 assert(size >= 8);
1066 assert(pci_is_express(dev));
1067
1068 if (offset != PCI_CONFIG_SPACE_SIZE) {
1069 uint16_t prev;
1070
1071 /*
1072 * 0xffffffff is not a valid cap id (it's a 16 bit field). use
1073 * internally to find the last capability in the linked list.
1074 */
1075 pcie_find_capability_list(dev, 0xffffffff, &prev);
1076 assert(prev >= PCI_CONFIG_SPACE_SIZE);
1077 pcie_ext_cap_set_next(dev, prev, offset);
1078 }
1079 pci_set_long(dev->config + offset, PCI_EXT_CAP(cap_id, cap_ver, 0));
1080
1081 /* Make capability read-only by default */
1082 memset(dev->wmask + offset, 0, size);
1083 memset(dev->w1cmask + offset, 0, size);
1084 /* Check capability by default */
1085 memset(dev->cmask + offset, 0xFF, size);
1086 }
1087
1088 /*
1089 * Sync the PCIe Link Status negotiated speed and width of a bridge with the
1090 * downstream device. If downstream device is not present, re-write with the
1091 * Link Capability fields. If downstream device reports invalid width or
1092 * speed, replace with minimum values (LnkSta fields are RsvdZ on VFs but such
1093 * values interfere with PCIe native hotplug detecting new devices). Limit
1094 * width and speed to bridge capabilities for compatibility. Use config_read
1095 * to access the downstream device since it could be an assigned device with
1096 * volatile link information.
1097 */
pcie_sync_bridge_lnk(PCIDevice * bridge_dev)1098 void pcie_sync_bridge_lnk(PCIDevice *bridge_dev)
1099 {
1100 PCIBridge *br = PCI_BRIDGE(bridge_dev);
1101 PCIBus *bus = pci_bridge_get_sec_bus(br);
1102 PCIDevice *target = bus->devices[0];
1103 uint8_t *exp_cap = bridge_dev->config + bridge_dev->exp.exp_cap;
1104 uint16_t lnksta, lnkcap = pci_get_word(exp_cap + PCI_EXP_LNKCAP);
1105
1106 if (!target || !target->exp.exp_cap) {
1107 lnksta = lnkcap;
1108 } else {
1109 lnksta = target->config_read(target,
1110 target->exp.exp_cap + PCI_EXP_LNKSTA,
1111 sizeof(lnksta));
1112
1113 if ((lnksta & PCI_EXP_LNKSTA_NLW) > (lnkcap & PCI_EXP_LNKCAP_MLW)) {
1114 lnksta &= ~PCI_EXP_LNKSTA_NLW;
1115 lnksta |= lnkcap & PCI_EXP_LNKCAP_MLW;
1116 } else if (!(lnksta & PCI_EXP_LNKSTA_NLW)) {
1117 lnksta |= QEMU_PCI_EXP_LNKSTA_NLW(QEMU_PCI_EXP_LNK_X1);
1118 }
1119
1120 if ((lnksta & PCI_EXP_LNKSTA_CLS) > (lnkcap & PCI_EXP_LNKCAP_SLS)) {
1121 lnksta &= ~PCI_EXP_LNKSTA_CLS;
1122 lnksta |= lnkcap & PCI_EXP_LNKCAP_SLS;
1123 } else if (!(lnksta & PCI_EXP_LNKSTA_CLS)) {
1124 lnksta |= QEMU_PCI_EXP_LNKSTA_CLS(QEMU_PCI_EXP_LNK_2_5GT);
1125 }
1126 }
1127
1128 pci_word_test_and_clear_mask(exp_cap + PCI_EXP_LNKSTA,
1129 PCI_EXP_LNKSTA_CLS | PCI_EXP_LNKSTA_NLW);
1130 pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA, lnksta &
1131 (PCI_EXP_LNKSTA_CLS | PCI_EXP_LNKSTA_NLW));
1132 }
1133
1134 /**************************************************************************
1135 * pci express extended capability helper functions
1136 */
1137
1138 /* ARI */
pcie_ari_init(PCIDevice * dev,uint16_t offset)1139 void pcie_ari_init(PCIDevice *dev, uint16_t offset)
1140 {
1141 uint16_t nextfn = dev->cap_present & QEMU_PCIE_ARI_NEXTFN_1 ? 1 : 0;
1142
1143 pcie_add_capability(dev, PCI_EXT_CAP_ID_ARI, PCI_ARI_VER,
1144 offset, PCI_ARI_SIZEOF);
1145 pci_set_long(dev->config + offset + PCI_ARI_CAP, (nextfn & 0xff) << 8);
1146 }
1147
pcie_dev_ser_num_init(PCIDevice * dev,uint16_t offset,uint64_t ser_num)1148 void pcie_dev_ser_num_init(PCIDevice *dev, uint16_t offset, uint64_t ser_num)
1149 {
1150 static const int pci_dsn_ver = 1;
1151 static const int pci_dsn_cap = 4;
1152
1153 pcie_add_capability(dev, PCI_EXT_CAP_ID_DSN, pci_dsn_ver, offset,
1154 PCI_EXT_CAP_DSN_SIZEOF);
1155 pci_set_quad(dev->config + offset + pci_dsn_cap, ser_num);
1156 }
1157
pcie_ats_init(PCIDevice * dev,uint16_t offset,bool aligned)1158 void pcie_ats_init(PCIDevice *dev, uint16_t offset, bool aligned)
1159 {
1160 pcie_add_capability(dev, PCI_EXT_CAP_ID_ATS, 0x1,
1161 offset, PCI_EXT_CAP_ATS_SIZEOF);
1162
1163 dev->exp.ats_cap = offset;
1164
1165 /* Invalidate Queue Depth 0 */
1166 if (aligned) {
1167 pci_set_word(dev->config + offset + PCI_ATS_CAP,
1168 PCI_ATS_CAP_PAGE_ALIGNED);
1169 }
1170 /* STU 0, Disabled by default */
1171 pci_set_word(dev->config + offset + PCI_ATS_CTRL, 0);
1172
1173 pci_set_word(dev->wmask + dev->exp.ats_cap + PCI_ATS_CTRL, 0x800f);
1174 }
1175
1176 /* ACS (Access Control Services) */
pcie_acs_init(PCIDevice * dev,uint16_t offset)1177 void pcie_acs_init(PCIDevice *dev, uint16_t offset)
1178 {
1179 bool is_downstream = pci_is_express_downstream_port(dev);
1180 uint16_t cap_bits = 0;
1181
1182 /* For endpoints, only multifunction devs may have an ACS capability: */
1183 assert(is_downstream ||
1184 (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) ||
1185 PCI_FUNC(dev->devfn));
1186
1187 pcie_add_capability(dev, PCI_EXT_CAP_ID_ACS, PCI_ACS_VER, offset,
1188 PCI_ACS_SIZEOF);
1189 dev->exp.acs_cap = offset;
1190
1191 if (is_downstream) {
1192 /*
1193 * Downstream ports must implement SV, TB, RR, CR, UF, and DT (with
1194 * caveats on the latter four that we ignore for simplicity).
1195 * Endpoints may also implement a subset of ACS capabilities,
1196 * but these are optional if the endpoint does not support
1197 * peer-to-peer between functions and thus omitted here.
1198 */
1199 cap_bits = PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
1200 PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT;
1201 }
1202
1203 pci_set_word(dev->config + offset + PCI_ACS_CAP, cap_bits);
1204 pci_set_word(dev->wmask + offset + PCI_ACS_CTRL, cap_bits);
1205 }
1206
pcie_acs_reset(PCIDevice * dev)1207 void pcie_acs_reset(PCIDevice *dev)
1208 {
1209 if (dev->exp.acs_cap) {
1210 pci_set_word(dev->config + dev->exp.acs_cap + PCI_ACS_CTRL, 0);
1211 }
1212 }
1213