1 /* 2 * pcie.c 3 * 4 * Copyright (c) 2010 Isaku Yamahata <yamahata at valinux co jp> 5 * VA Linux Systems Japan K.K. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License along 18 * with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "qapi/error.h" 23 #include "qemu-common.h" 24 #include "hw/pci/pci_bridge.h" 25 #include "hw/pci/pcie.h" 26 #include "hw/pci/msix.h" 27 #include "hw/pci/msi.h" 28 #include "hw/pci/pci_bus.h" 29 #include "hw/pci/pcie_regs.h" 30 #include "qemu/range.h" 31 32 //#define DEBUG_PCIE 33 #ifdef DEBUG_PCIE 34 # define PCIE_DPRINTF(fmt, ...) \ 35 fprintf(stderr, "%s:%d " fmt, __func__, __LINE__, ## __VA_ARGS__) 36 #else 37 # define PCIE_DPRINTF(fmt, ...) do {} while (0) 38 #endif 39 #define PCIE_DEV_PRINTF(dev, fmt, ...) \ 40 PCIE_DPRINTF("%s:%x "fmt, (dev)->name, (dev)->devfn, ## __VA_ARGS__) 41 42 43 /*************************************************************************** 44 * pci express capability helper functions 45 */ 46 47 static void 48 pcie_cap_v1_fill(PCIDevice *dev, uint8_t port, uint8_t type, uint8_t version) 49 { 50 uint8_t *exp_cap = dev->config + dev->exp.exp_cap; 51 uint8_t *cmask = dev->cmask + dev->exp.exp_cap; 52 53 /* capability register 54 interrupt message number defaults to 0 */ 55 pci_set_word(exp_cap + PCI_EXP_FLAGS, 56 ((type << PCI_EXP_FLAGS_TYPE_SHIFT) & PCI_EXP_FLAGS_TYPE) | 57 version); 58 59 /* device capability register 60 * table 7-12: 61 * roll based error reporting bit must be set by all 62 * Functions conforming to the ECN, PCI Express Base 63 * Specification, Revision 1.1., or subsequent PCI Express Base 64 * Specification revisions. 65 */ 66 pci_set_long(exp_cap + PCI_EXP_DEVCAP, PCI_EXP_DEVCAP_RBER); 67 68 pci_set_long(exp_cap + PCI_EXP_LNKCAP, 69 (port << PCI_EXP_LNKCAP_PN_SHIFT) | 70 PCI_EXP_LNKCAP_ASPMS_0S | 71 PCI_EXP_LNK_MLW_1 | 72 PCI_EXP_LNK_LS_25); 73 74 pci_set_word(exp_cap + PCI_EXP_LNKSTA, 75 PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25); 76 77 if (dev->cap_present & QEMU_PCIE_LNKSTA_DLLLA) { 78 pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA, 79 PCI_EXP_LNKSTA_DLLLA); 80 } 81 82 /* We changed link status bits over time, and changing them across 83 * migrations is generally fine as hardware changes them too. 84 * Let's not bother checking. 85 */ 86 pci_set_word(cmask + PCI_EXP_LNKSTA, 0); 87 } 88 89 int pcie_cap_init(PCIDevice *dev, uint8_t offset, uint8_t type, uint8_t port) 90 { 91 /* PCIe cap v2 init */ 92 int pos; 93 uint8_t *exp_cap; 94 95 assert(pci_is_express(dev)); 96 97 pos = pci_add_capability(dev, PCI_CAP_ID_EXP, offset, PCI_EXP_VER2_SIZEOF); 98 if (pos < 0) { 99 return pos; 100 } 101 dev->exp.exp_cap = pos; 102 exp_cap = dev->config + pos; 103 104 /* Filling values common with v1 */ 105 pcie_cap_v1_fill(dev, port, type, PCI_EXP_FLAGS_VER2); 106 107 /* Filling v2 specific values */ 108 pci_set_long(exp_cap + PCI_EXP_DEVCAP2, 109 PCI_EXP_DEVCAP2_EFF | PCI_EXP_DEVCAP2_EETLPP); 110 111 pci_set_word(dev->wmask + pos + PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_EETLPPB); 112 113 if (dev->cap_present & QEMU_PCIE_EXTCAP_INIT) { 114 /* read-only to behave like a 'NULL' Extended Capability Header */ 115 pci_set_long(dev->wmask + PCI_CONFIG_SPACE_SIZE, 0); 116 } 117 118 return pos; 119 } 120 121 int pcie_cap_v1_init(PCIDevice *dev, uint8_t offset, uint8_t type, 122 uint8_t port) 123 { 124 /* PCIe cap v1 init */ 125 int pos; 126 127 assert(pci_is_express(dev)); 128 129 pos = pci_add_capability(dev, PCI_CAP_ID_EXP, offset, PCI_EXP_VER1_SIZEOF); 130 if (pos < 0) { 131 return pos; 132 } 133 dev->exp.exp_cap = pos; 134 135 pcie_cap_v1_fill(dev, port, type, PCI_EXP_FLAGS_VER1); 136 137 return pos; 138 } 139 140 static int 141 pcie_endpoint_cap_common_init(PCIDevice *dev, uint8_t offset, uint8_t cap_size) 142 { 143 uint8_t type = PCI_EXP_TYPE_ENDPOINT; 144 145 /* 146 * Windows guests will report Code 10, device cannot start, if 147 * a regular Endpoint type is exposed on a root complex. These 148 * should instead be Root Complex Integrated Endpoints. 149 */ 150 if (pci_bus_is_express(dev->bus) && pci_bus_is_root(dev->bus)) { 151 type = PCI_EXP_TYPE_RC_END; 152 } 153 154 return (cap_size == PCI_EXP_VER1_SIZEOF) 155 ? pcie_cap_v1_init(dev, offset, type, 0) 156 : pcie_cap_init(dev, offset, type, 0); 157 } 158 159 int pcie_endpoint_cap_init(PCIDevice *dev, uint8_t offset) 160 { 161 return pcie_endpoint_cap_common_init(dev, offset, PCI_EXP_VER2_SIZEOF); 162 } 163 164 int pcie_endpoint_cap_v1_init(PCIDevice *dev, uint8_t offset) 165 { 166 return pcie_endpoint_cap_common_init(dev, offset, PCI_EXP_VER1_SIZEOF); 167 } 168 169 void pcie_cap_exit(PCIDevice *dev) 170 { 171 pci_del_capability(dev, PCI_CAP_ID_EXP, PCI_EXP_VER2_SIZEOF); 172 } 173 174 void pcie_cap_v1_exit(PCIDevice *dev) 175 { 176 pci_del_capability(dev, PCI_CAP_ID_EXP, PCI_EXP_VER1_SIZEOF); 177 } 178 179 uint8_t pcie_cap_get_type(const PCIDevice *dev) 180 { 181 uint32_t pos = dev->exp.exp_cap; 182 assert(pos > 0); 183 return (pci_get_word(dev->config + pos + PCI_EXP_FLAGS) & 184 PCI_EXP_FLAGS_TYPE) >> PCI_EXP_FLAGS_TYPE_SHIFT; 185 } 186 187 /* MSI/MSI-X */ 188 /* pci express interrupt message number */ 189 /* 7.8.2 PCI Express Capabilities Register: Interrupt Message Number */ 190 void pcie_cap_flags_set_vector(PCIDevice *dev, uint8_t vector) 191 { 192 uint8_t *exp_cap = dev->config + dev->exp.exp_cap; 193 assert(vector < 32); 194 pci_word_test_and_clear_mask(exp_cap + PCI_EXP_FLAGS, PCI_EXP_FLAGS_IRQ); 195 pci_word_test_and_set_mask(exp_cap + PCI_EXP_FLAGS, 196 vector << PCI_EXP_FLAGS_IRQ_SHIFT); 197 } 198 199 uint8_t pcie_cap_flags_get_vector(PCIDevice *dev) 200 { 201 return (pci_get_word(dev->config + dev->exp.exp_cap + PCI_EXP_FLAGS) & 202 PCI_EXP_FLAGS_IRQ) >> PCI_EXP_FLAGS_IRQ_SHIFT; 203 } 204 205 void pcie_cap_deverr_init(PCIDevice *dev) 206 { 207 uint32_t pos = dev->exp.exp_cap; 208 pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_DEVCAP, 209 PCI_EXP_DEVCAP_RBER); 210 pci_long_test_and_set_mask(dev->wmask + pos + PCI_EXP_DEVCTL, 211 PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | 212 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE); 213 pci_long_test_and_set_mask(dev->w1cmask + pos + PCI_EXP_DEVSTA, 214 PCI_EXP_DEVSTA_CED | PCI_EXP_DEVSTA_NFED | 215 PCI_EXP_DEVSTA_FED | PCI_EXP_DEVSTA_URD); 216 } 217 218 void pcie_cap_deverr_reset(PCIDevice *dev) 219 { 220 uint8_t *devctl = dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL; 221 pci_long_test_and_clear_mask(devctl, 222 PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | 223 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE); 224 } 225 226 void pcie_cap_lnkctl_init(PCIDevice *dev) 227 { 228 uint32_t pos = dev->exp.exp_cap; 229 pci_long_test_and_set_mask(dev->wmask + pos + PCI_EXP_LNKCTL, 230 PCI_EXP_LNKCTL_CCC | PCI_EXP_LNKCTL_ES); 231 } 232 233 void pcie_cap_lnkctl_reset(PCIDevice *dev) 234 { 235 uint8_t *lnkctl = dev->config + dev->exp.exp_cap + PCI_EXP_LNKCTL; 236 pci_long_test_and_clear_mask(lnkctl, 237 PCI_EXP_LNKCTL_CCC | PCI_EXP_LNKCTL_ES); 238 } 239 240 static void hotplug_event_update_event_status(PCIDevice *dev) 241 { 242 uint32_t pos = dev->exp.exp_cap; 243 uint8_t *exp_cap = dev->config + pos; 244 uint16_t sltctl = pci_get_word(exp_cap + PCI_EXP_SLTCTL); 245 uint16_t sltsta = pci_get_word(exp_cap + PCI_EXP_SLTSTA); 246 247 dev->exp.hpev_notified = (sltctl & PCI_EXP_SLTCTL_HPIE) && 248 (sltsta & sltctl & PCI_EXP_HP_EV_SUPPORTED); 249 } 250 251 static void hotplug_event_notify(PCIDevice *dev) 252 { 253 bool prev = dev->exp.hpev_notified; 254 255 hotplug_event_update_event_status(dev); 256 257 if (prev == dev->exp.hpev_notified) { 258 return; 259 } 260 261 /* Note: the logic above does not take into account whether interrupts 262 * are masked. The result is that interrupt will be sent when it is 263 * subsequently unmasked. This appears to be legal: Section 6.7.3.4: 264 * The Port may optionally send an MSI when there are hot-plug events that 265 * occur while interrupt generation is disabled, and interrupt generation is 266 * subsequently enabled. */ 267 if (msix_enabled(dev)) { 268 msix_notify(dev, pcie_cap_flags_get_vector(dev)); 269 } else if (msi_enabled(dev)) { 270 msi_notify(dev, pcie_cap_flags_get_vector(dev)); 271 } else { 272 pci_set_irq(dev, dev->exp.hpev_notified); 273 } 274 } 275 276 static void hotplug_event_clear(PCIDevice *dev) 277 { 278 hotplug_event_update_event_status(dev); 279 if (!msix_enabled(dev) && !msi_enabled(dev) && !dev->exp.hpev_notified) { 280 pci_irq_deassert(dev); 281 } 282 } 283 284 /* 285 * A PCI Express Hot-Plug Event has occurred, so update slot status register 286 * and notify OS of the event if necessary. 287 * 288 * 6.7.3 PCI Express Hot-Plug Events 289 * 6.7.3.4 Software Notification of Hot-Plug Events 290 */ 291 static void pcie_cap_slot_event(PCIDevice *dev, PCIExpressHotPlugEvent event) 292 { 293 /* Minor optimization: if nothing changed - no event is needed. */ 294 if (pci_word_test_and_set_mask(dev->config + dev->exp.exp_cap + 295 PCI_EXP_SLTSTA, event)) { 296 return; 297 } 298 hotplug_event_notify(dev); 299 } 300 301 static void pcie_cap_slot_hotplug_common(PCIDevice *hotplug_dev, 302 DeviceState *dev, 303 uint8_t **exp_cap, Error **errp) 304 { 305 *exp_cap = hotplug_dev->config + hotplug_dev->exp.exp_cap; 306 uint16_t sltsta = pci_get_word(*exp_cap + PCI_EXP_SLTSTA); 307 308 PCIE_DEV_PRINTF(PCI_DEVICE(dev), "hotplug state: 0x%x\n", sltsta); 309 if (sltsta & PCI_EXP_SLTSTA_EIS) { 310 /* the slot is electromechanically locked. 311 * This error is propagated up to qdev and then to HMP/QMP. 312 */ 313 error_setg_errno(errp, EBUSY, "slot is electromechanically locked"); 314 } 315 } 316 317 void pcie_cap_slot_hotplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, 318 Error **errp) 319 { 320 uint8_t *exp_cap; 321 PCIDevice *pci_dev = PCI_DEVICE(dev); 322 323 pcie_cap_slot_hotplug_common(PCI_DEVICE(hotplug_dev), dev, &exp_cap, errp); 324 325 /* Don't send event when device is enabled during qemu machine creation: 326 * it is present on boot, no hotplug event is necessary. We do send an 327 * event when the device is disabled later. */ 328 if (!dev->hotplugged) { 329 pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA, 330 PCI_EXP_SLTSTA_PDS); 331 return; 332 } 333 334 /* To enable multifunction hot-plug, we just ensure the function 335 * 0 added last. When function 0 is added, we set the sltsta and 336 * inform OS via event notification. 337 */ 338 if (pci_get_function_0(pci_dev)) { 339 pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA, 340 PCI_EXP_SLTSTA_PDS); 341 pcie_cap_slot_event(PCI_DEVICE(hotplug_dev), 342 PCI_EXP_HP_EV_PDC | PCI_EXP_HP_EV_ABP); 343 } 344 } 345 346 static void pcie_unplug_device(PCIBus *bus, PCIDevice *dev, void *opaque) 347 { 348 object_unparent(OBJECT(dev)); 349 } 350 351 void pcie_cap_slot_hot_unplug_request_cb(HotplugHandler *hotplug_dev, 352 DeviceState *dev, Error **errp) 353 { 354 uint8_t *exp_cap; 355 PCIDevice *pci_dev = PCI_DEVICE(dev); 356 PCIBus *bus = pci_dev->bus; 357 358 pcie_cap_slot_hotplug_common(PCI_DEVICE(hotplug_dev), dev, &exp_cap, errp); 359 360 /* In case user cancel the operation of multi-function hot-add, 361 * remove the function that is unexposed to guest individually, 362 * without interaction with guest. 363 */ 364 if (pci_dev->devfn && 365 !bus->devices[0]) { 366 pcie_unplug_device(bus, pci_dev, NULL); 367 368 return; 369 } 370 371 pcie_cap_slot_push_attention_button(PCI_DEVICE(hotplug_dev)); 372 } 373 374 /* pci express slot for pci express root/downstream port 375 PCI express capability slot registers */ 376 void pcie_cap_slot_init(PCIDevice *dev, uint16_t slot) 377 { 378 uint32_t pos = dev->exp.exp_cap; 379 380 pci_word_test_and_set_mask(dev->config + pos + PCI_EXP_FLAGS, 381 PCI_EXP_FLAGS_SLOT); 382 383 pci_long_test_and_clear_mask(dev->config + pos + PCI_EXP_SLTCAP, 384 ~PCI_EXP_SLTCAP_PSN); 385 pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCAP, 386 (slot << PCI_EXP_SLTCAP_PSN_SHIFT) | 387 PCI_EXP_SLTCAP_EIP | 388 PCI_EXP_SLTCAP_HPS | 389 PCI_EXP_SLTCAP_HPC | 390 PCI_EXP_SLTCAP_PIP | 391 PCI_EXP_SLTCAP_AIP | 392 PCI_EXP_SLTCAP_ABP); 393 394 if (dev->cap_present & QEMU_PCIE_SLTCAP_PCP) { 395 pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCAP, 396 PCI_EXP_SLTCAP_PCP); 397 pci_word_test_and_clear_mask(dev->config + pos + PCI_EXP_SLTCTL, 398 PCI_EXP_SLTCTL_PCC); 399 pci_word_test_and_set_mask(dev->wmask + pos + PCI_EXP_SLTCTL, 400 PCI_EXP_SLTCTL_PCC); 401 } 402 403 pci_word_test_and_clear_mask(dev->config + pos + PCI_EXP_SLTCTL, 404 PCI_EXP_SLTCTL_PIC | 405 PCI_EXP_SLTCTL_AIC); 406 pci_word_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCTL, 407 PCI_EXP_SLTCTL_PIC_OFF | 408 PCI_EXP_SLTCTL_AIC_OFF); 409 pci_word_test_and_set_mask(dev->wmask + pos + PCI_EXP_SLTCTL, 410 PCI_EXP_SLTCTL_PIC | 411 PCI_EXP_SLTCTL_AIC | 412 PCI_EXP_SLTCTL_HPIE | 413 PCI_EXP_SLTCTL_CCIE | 414 PCI_EXP_SLTCTL_PDCE | 415 PCI_EXP_SLTCTL_ABPE); 416 /* Although reading PCI_EXP_SLTCTL_EIC returns always 0, 417 * make the bit writable here in order to detect 1b is written. 418 * pcie_cap_slot_write_config() test-and-clear the bit, so 419 * this bit always returns 0 to the guest. 420 */ 421 pci_word_test_and_set_mask(dev->wmask + pos + PCI_EXP_SLTCTL, 422 PCI_EXP_SLTCTL_EIC); 423 424 pci_word_test_and_set_mask(dev->w1cmask + pos + PCI_EXP_SLTSTA, 425 PCI_EXP_HP_EV_SUPPORTED); 426 427 dev->exp.hpev_notified = false; 428 429 qbus_set_hotplug_handler(BUS(pci_bridge_get_sec_bus(PCI_BRIDGE(dev))), 430 DEVICE(dev), NULL); 431 } 432 433 void pcie_cap_slot_reset(PCIDevice *dev) 434 { 435 uint8_t *exp_cap = dev->config + dev->exp.exp_cap; 436 uint8_t port_type = pcie_cap_get_type(dev); 437 438 assert(port_type == PCI_EXP_TYPE_DOWNSTREAM || 439 port_type == PCI_EXP_TYPE_ROOT_PORT); 440 441 PCIE_DEV_PRINTF(dev, "reset\n"); 442 443 pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL, 444 PCI_EXP_SLTCTL_EIC | 445 PCI_EXP_SLTCTL_PIC | 446 PCI_EXP_SLTCTL_AIC | 447 PCI_EXP_SLTCTL_HPIE | 448 PCI_EXP_SLTCTL_CCIE | 449 PCI_EXP_SLTCTL_PDCE | 450 PCI_EXP_SLTCTL_ABPE); 451 pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL, 452 PCI_EXP_SLTCTL_AIC_OFF); 453 454 if (dev->cap_present & QEMU_PCIE_SLTCAP_PCP) { 455 /* Downstream ports enforce device number 0. */ 456 bool populated = pci_bridge_get_sec_bus(PCI_BRIDGE(dev))->devices[0]; 457 uint16_t pic; 458 459 if (populated) { 460 pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL, 461 PCI_EXP_SLTCTL_PCC); 462 } else { 463 pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL, 464 PCI_EXP_SLTCTL_PCC); 465 } 466 467 pic = populated ? PCI_EXP_SLTCTL_PIC_ON : PCI_EXP_SLTCTL_PIC_OFF; 468 pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL, pic); 469 } 470 471 pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA, 472 PCI_EXP_SLTSTA_EIS |/* on reset, 473 the lock is released */ 474 PCI_EXP_SLTSTA_CC | 475 PCI_EXP_SLTSTA_PDC | 476 PCI_EXP_SLTSTA_ABP); 477 478 hotplug_event_update_event_status(dev); 479 } 480 481 void pcie_cap_slot_write_config(PCIDevice *dev, 482 uint32_t addr, uint32_t val, int len) 483 { 484 uint32_t pos = dev->exp.exp_cap; 485 uint8_t *exp_cap = dev->config + pos; 486 uint16_t sltsta = pci_get_word(exp_cap + PCI_EXP_SLTSTA); 487 488 if (ranges_overlap(addr, len, pos + PCI_EXP_SLTSTA, 2)) { 489 hotplug_event_clear(dev); 490 } 491 492 if (!ranges_overlap(addr, len, pos + PCI_EXP_SLTCTL, 2)) { 493 return; 494 } 495 496 if (pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL, 497 PCI_EXP_SLTCTL_EIC)) { 498 sltsta ^= PCI_EXP_SLTSTA_EIS; /* toggle PCI_EXP_SLTSTA_EIS bit */ 499 pci_set_word(exp_cap + PCI_EXP_SLTSTA, sltsta); 500 PCIE_DEV_PRINTF(dev, "PCI_EXP_SLTCTL_EIC: " 501 "sltsta -> 0x%02"PRIx16"\n", 502 sltsta); 503 } 504 505 /* 506 * If the slot is polulated, power indicator is off and power 507 * controller is off, it is safe to detach the devices. 508 */ 509 if ((sltsta & PCI_EXP_SLTSTA_PDS) && (val & PCI_EXP_SLTCTL_PCC) && 510 ((val & PCI_EXP_SLTCTL_PIC_OFF) == PCI_EXP_SLTCTL_PIC_OFF)) { 511 PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev)); 512 pci_for_each_device(sec_bus, pci_bus_num(sec_bus), 513 pcie_unplug_device, NULL); 514 515 pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA, 516 PCI_EXP_SLTSTA_PDS); 517 pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA, 518 PCI_EXP_SLTSTA_PDC); 519 } 520 521 hotplug_event_notify(dev); 522 523 /* 524 * 6.7.3.2 Command Completed Events 525 * 526 * Software issues a command to a hot-plug capable Downstream Port by 527 * issuing a write transaction that targets any portion of the Port’s Slot 528 * Control register. A single write to the Slot Control register is 529 * considered to be a single command, even if the write affects more than 530 * one field in the Slot Control register. In response to this transaction, 531 * the Port must carry out the requested actions and then set the 532 * associated status field for the command completed event. */ 533 534 /* Real hardware might take a while to complete requested command because 535 * physical movement would be involved like locking the electromechanical 536 * lock. However in our case, command is completed instantaneously above, 537 * so send a command completion event right now. 538 */ 539 pcie_cap_slot_event(dev, PCI_EXP_HP_EV_CCI); 540 } 541 542 int pcie_cap_slot_post_load(void *opaque, int version_id) 543 { 544 PCIDevice *dev = opaque; 545 hotplug_event_update_event_status(dev); 546 return 0; 547 } 548 549 void pcie_cap_slot_push_attention_button(PCIDevice *dev) 550 { 551 pcie_cap_slot_event(dev, PCI_EXP_HP_EV_ABP); 552 } 553 554 /* root control/capabilities/status. PME isn't emulated for now */ 555 void pcie_cap_root_init(PCIDevice *dev) 556 { 557 pci_set_word(dev->wmask + dev->exp.exp_cap + PCI_EXP_RTCTL, 558 PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE | 559 PCI_EXP_RTCTL_SEFEE); 560 } 561 562 void pcie_cap_root_reset(PCIDevice *dev) 563 { 564 pci_set_word(dev->config + dev->exp.exp_cap + PCI_EXP_RTCTL, 0); 565 } 566 567 /* function level reset(FLR) */ 568 void pcie_cap_flr_init(PCIDevice *dev) 569 { 570 pci_long_test_and_set_mask(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCAP, 571 PCI_EXP_DEVCAP_FLR); 572 573 /* Although reading BCR_FLR returns always 0, 574 * the bit is made writable here in order to detect the 1b is written 575 * pcie_cap_flr_write_config() test-and-clear the bit, so 576 * this bit always returns 0 to the guest. 577 */ 578 pci_word_test_and_set_mask(dev->wmask + dev->exp.exp_cap + PCI_EXP_DEVCTL, 579 PCI_EXP_DEVCTL_BCR_FLR); 580 } 581 582 void pcie_cap_flr_write_config(PCIDevice *dev, 583 uint32_t addr, uint32_t val, int len) 584 { 585 uint8_t *devctl = dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL; 586 if (pci_get_word(devctl) & PCI_EXP_DEVCTL_BCR_FLR) { 587 /* Clear PCI_EXP_DEVCTL_BCR_FLR after invoking the reset handler 588 so the handler can detect FLR by looking at this bit. */ 589 pci_device_reset(dev); 590 pci_word_test_and_clear_mask(devctl, PCI_EXP_DEVCTL_BCR_FLR); 591 } 592 } 593 594 /* Alternative Routing-ID Interpretation (ARI) 595 * forwarding support for root and downstream ports 596 */ 597 void pcie_cap_arifwd_init(PCIDevice *dev) 598 { 599 uint32_t pos = dev->exp.exp_cap; 600 pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_DEVCAP2, 601 PCI_EXP_DEVCAP2_ARI); 602 pci_long_test_and_set_mask(dev->wmask + pos + PCI_EXP_DEVCTL2, 603 PCI_EXP_DEVCTL2_ARI); 604 } 605 606 void pcie_cap_arifwd_reset(PCIDevice *dev) 607 { 608 uint8_t *devctl2 = dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL2; 609 pci_long_test_and_clear_mask(devctl2, PCI_EXP_DEVCTL2_ARI); 610 } 611 612 bool pcie_cap_is_arifwd_enabled(const PCIDevice *dev) 613 { 614 if (!pci_is_express(dev)) { 615 return false; 616 } 617 if (!dev->exp.exp_cap) { 618 return false; 619 } 620 621 return pci_get_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL2) & 622 PCI_EXP_DEVCTL2_ARI; 623 } 624 625 /************************************************************************** 626 * pci express extended capability list management functions 627 * uint16_t ext_cap_id (16 bit) 628 * uint8_t cap_ver (4 bit) 629 * uint16_t cap_offset (12 bit) 630 * uint16_t ext_cap_size 631 */ 632 633 /* Passing a cap_id value > 0xffff will return 0 and put end of list in prev */ 634 static uint16_t pcie_find_capability_list(PCIDevice *dev, uint32_t cap_id, 635 uint16_t *prev_p) 636 { 637 uint16_t prev = 0; 638 uint16_t next; 639 uint32_t header = pci_get_long(dev->config + PCI_CONFIG_SPACE_SIZE); 640 641 if (!header) { 642 /* no extended capability */ 643 next = 0; 644 goto out; 645 } 646 for (next = PCI_CONFIG_SPACE_SIZE; next; 647 prev = next, next = PCI_EXT_CAP_NEXT(header)) { 648 649 assert(next >= PCI_CONFIG_SPACE_SIZE); 650 assert(next <= PCIE_CONFIG_SPACE_SIZE - 8); 651 652 header = pci_get_long(dev->config + next); 653 if (PCI_EXT_CAP_ID(header) == cap_id) { 654 break; 655 } 656 } 657 658 out: 659 if (prev_p) { 660 *prev_p = prev; 661 } 662 return next; 663 } 664 665 uint16_t pcie_find_capability(PCIDevice *dev, uint16_t cap_id) 666 { 667 return pcie_find_capability_list(dev, cap_id, NULL); 668 } 669 670 static void pcie_ext_cap_set_next(PCIDevice *dev, uint16_t pos, uint16_t next) 671 { 672 uint32_t header = pci_get_long(dev->config + pos); 673 assert(!(next & (PCI_EXT_CAP_ALIGN - 1))); 674 header = (header & ~PCI_EXT_CAP_NEXT_MASK) | 675 ((next << PCI_EXT_CAP_NEXT_SHIFT) & PCI_EXT_CAP_NEXT_MASK); 676 pci_set_long(dev->config + pos, header); 677 } 678 679 /* 680 * Caller must supply valid (offset, size) such that the range wouldn't 681 * overlap with other capability or other registers. 682 * This function doesn't check it. 683 */ 684 void pcie_add_capability(PCIDevice *dev, 685 uint16_t cap_id, uint8_t cap_ver, 686 uint16_t offset, uint16_t size) 687 { 688 assert(offset >= PCI_CONFIG_SPACE_SIZE); 689 assert(offset < offset + size); 690 assert(offset + size <= PCIE_CONFIG_SPACE_SIZE); 691 assert(size >= 8); 692 assert(pci_is_express(dev)); 693 694 if (offset != PCI_CONFIG_SPACE_SIZE) { 695 uint16_t prev; 696 697 /* 698 * 0xffffffff is not a valid cap id (it's a 16 bit field). use 699 * internally to find the last capability in the linked list. 700 */ 701 pcie_find_capability_list(dev, 0xffffffff, &prev); 702 assert(prev >= PCI_CONFIG_SPACE_SIZE); 703 pcie_ext_cap_set_next(dev, prev, offset); 704 } 705 pci_set_long(dev->config + offset, PCI_EXT_CAP(cap_id, cap_ver, 0)); 706 707 /* Make capability read-only by default */ 708 memset(dev->wmask + offset, 0, size); 709 memset(dev->w1cmask + offset, 0, size); 710 /* Check capability by default */ 711 memset(dev->cmask + offset, 0xFF, size); 712 } 713 714 /************************************************************************** 715 * pci express extended capability helper functions 716 */ 717 718 /* ARI */ 719 void pcie_ari_init(PCIDevice *dev, uint16_t offset, uint16_t nextfn) 720 { 721 pcie_add_capability(dev, PCI_EXT_CAP_ID_ARI, PCI_ARI_VER, 722 offset, PCI_ARI_SIZEOF); 723 pci_set_long(dev->config + offset + PCI_ARI_CAP, (nextfn & 0xff) << 8); 724 } 725 726 void pcie_dev_ser_num_init(PCIDevice *dev, uint16_t offset, uint64_t ser_num) 727 { 728 static const int pci_dsn_ver = 1; 729 static const int pci_dsn_cap = 4; 730 731 pcie_add_capability(dev, PCI_EXT_CAP_ID_DSN, pci_dsn_ver, offset, 732 PCI_EXT_CAP_DSN_SIZEOF); 733 pci_set_quad(dev->config + offset + pci_dsn_cap, ser_num); 734 } 735 736 void pcie_ats_init(PCIDevice *dev, uint16_t offset) 737 { 738 pcie_add_capability(dev, PCI_EXT_CAP_ID_ATS, 0x1, 739 offset, PCI_EXT_CAP_ATS_SIZEOF); 740 741 dev->exp.ats_cap = offset; 742 743 /* Invalidate Queue Depth 0, Page Aligned Request 0 */ 744 pci_set_word(dev->config + offset + PCI_ATS_CAP, 0); 745 /* STU 0, Disabled by default */ 746 pci_set_word(dev->config + offset + PCI_ATS_CTRL, 0); 747 748 pci_set_word(dev->wmask + dev->exp.ats_cap + PCI_ATS_CTRL, 0x800f); 749 } 750