1 #ifndef QEMU_PCI_DEVICE_H 2 #define QEMU_PCI_DEVICE_H 3 4 #include "hw/pci/pci.h" 5 #include "hw/pci/pcie.h" 6 #include "hw/pci/pcie_doe.h" 7 8 #define TYPE_PCI_DEVICE "pci-device" 9 typedef struct PCIDeviceClass PCIDeviceClass; 10 DECLARE_OBJ_CHECKERS(PCIDevice, PCIDeviceClass, 11 PCI_DEVICE, TYPE_PCI_DEVICE) 12 13 /* 14 * Implemented by devices that can be plugged on CXL buses. In the spec, this is 15 * actually a "CXL Component, but we name it device to match the PCI naming. 16 */ 17 #define INTERFACE_CXL_DEVICE "cxl-device" 18 19 /* Implemented by devices that can be plugged on PCI Express buses */ 20 #define INTERFACE_PCIE_DEVICE "pci-express-device" 21 22 /* Implemented by devices that can be plugged on Conventional PCI buses */ 23 #define INTERFACE_CONVENTIONAL_PCI_DEVICE "conventional-pci-device" 24 25 struct PCIDeviceClass { 26 DeviceClass parent_class; 27 28 void (*realize)(PCIDevice *dev, Error **errp); 29 PCIUnregisterFunc *exit; 30 PCIConfigReadFunc *config_read; 31 PCIConfigWriteFunc *config_write; 32 33 uint16_t vendor_id; 34 uint16_t device_id; 35 uint8_t revision; 36 uint16_t class_id; 37 uint16_t subsystem_vendor_id; /* only for header type = 0 */ 38 uint16_t subsystem_id; /* only for header type = 0 */ 39 40 const char *romfile; /* rom bar */ 41 42 bool sriov_vf_user_creatable; 43 }; 44 45 enum PCIReqIDType { 46 PCI_REQ_ID_INVALID = 0, 47 PCI_REQ_ID_BDF, 48 PCI_REQ_ID_SECONDARY_BUS, 49 PCI_REQ_ID_MAX, 50 }; 51 typedef enum PCIReqIDType PCIReqIDType; 52 53 struct PCIReqIDCache { 54 PCIDevice *dev; 55 PCIReqIDType type; 56 }; 57 typedef struct PCIReqIDCache PCIReqIDCache; 58 59 struct PCIDevice { 60 DeviceState qdev; 61 bool partially_hotplugged; 62 bool enabled; 63 64 /* PCI config space */ 65 uint8_t *config; 66 67 /* 68 * Used to enable config checks on load. Note that writable bits are 69 * never checked even if set in cmask. 70 */ 71 uint8_t *cmask; 72 73 /* Used to implement R/W bytes */ 74 uint8_t *wmask; 75 76 /* Used to implement RW1C(Write 1 to Clear) bytes */ 77 uint8_t *w1cmask; 78 79 /* Used to allocate config space for capabilities. */ 80 uint8_t *used; 81 82 /* the following fields are read only */ 83 int32_t devfn; 84 /* 85 * Cached device to fetch requester ID from, to avoid the PCI tree 86 * walking every time we invoke PCI request (e.g., MSI). For 87 * conventional PCI root complex, this field is meaningless. 88 */ 89 PCIReqIDCache requester_id_cache; 90 char name[64]; 91 PCIIORegion io_regions[PCI_NUM_REGIONS]; 92 AddressSpace bus_master_as; 93 MemoryRegion bus_master_container_region; 94 MemoryRegion bus_master_enable_region; 95 96 /* do not access the following fields */ 97 PCIConfigReadFunc *config_read; 98 PCIConfigWriteFunc *config_write; 99 100 /* Legacy PCI VGA regions */ 101 MemoryRegion *vga_regions[QEMU_PCI_VGA_NUM_REGIONS]; 102 bool has_vga; 103 104 /* Current IRQ levels. Used internally by the generic PCI code. */ 105 uint8_t irq_state; 106 107 /* Capability bits */ 108 uint32_t cap_present; 109 110 /* Offset of MSI-X capability in config space */ 111 uint8_t msix_cap; 112 113 /* MSI-X entries */ 114 int msix_entries_nr; 115 116 /* Space to store MSIX table & pending bit array */ 117 uint8_t *msix_table; 118 uint8_t *msix_pba; 119 120 /* May be used by INTx or MSI during interrupt notification */ 121 void *irq_opaque; 122 123 MSITriggerFunc *msi_trigger; 124 MSIPrepareMessageFunc *msi_prepare_message; 125 MSIxPrepareMessageFunc *msix_prepare_message; 126 127 /* MemoryRegion container for msix exclusive BAR setup */ 128 MemoryRegion msix_exclusive_bar; 129 /* Memory Regions for MSIX table and pending bit entries. */ 130 MemoryRegion msix_table_mmio; 131 MemoryRegion msix_pba_mmio; 132 /* Reference-count for entries actually in use by driver. */ 133 unsigned *msix_entry_used; 134 /* MSIX function mask set or MSIX disabled */ 135 bool msix_function_masked; 136 /* Version id needed for VMState */ 137 int32_t version_id; 138 139 /* Offset of MSI capability in config space */ 140 uint8_t msi_cap; 141 142 /* PCI Express */ 143 PCIExpressDevice exp; 144 145 /* SHPC */ 146 SHPCDevice *shpc; 147 148 /* Location of option rom */ 149 char *romfile; 150 uint32_t romsize; 151 bool has_rom; 152 MemoryRegion rom; 153 uint32_t rom_bar; 154 155 /* INTx routing notifier */ 156 PCIINTxRoutingNotifier intx_routing_notifier; 157 158 /* MSI-X notifiers */ 159 MSIVectorUseNotifier msix_vector_use_notifier; 160 MSIVectorReleaseNotifier msix_vector_release_notifier; 161 MSIVectorPollNotifier msix_vector_poll_notifier; 162 163 /* SPDM */ 164 uint16_t spdm_port; 165 166 /* DOE */ 167 DOECap doe_spdm; 168 169 /* ID of standby device in net_failover pair */ 170 char *failover_pair_id; 171 uint32_t acpi_index; 172 173 char *sriov_pf; 174 }; 175 176 static inline int pci_intx(PCIDevice *pci_dev) 177 { 178 return pci_get_byte(pci_dev->config + PCI_INTERRUPT_PIN) - 1; 179 } 180 181 static inline int pci_is_cxl(const PCIDevice *d) 182 { 183 return d->cap_present & QEMU_PCIE_CAP_CXL; 184 } 185 186 static inline int pci_is_express(const PCIDevice *d) 187 { 188 return d->cap_present & QEMU_PCI_CAP_EXPRESS; 189 } 190 191 static inline int pci_is_express_downstream_port(const PCIDevice *d) 192 { 193 uint8_t type; 194 195 if (!pci_is_express(d) || !d->exp.exp_cap) { 196 return 0; 197 } 198 199 type = pcie_cap_get_type(d); 200 201 return type == PCI_EXP_TYPE_DOWNSTREAM || type == PCI_EXP_TYPE_ROOT_PORT; 202 } 203 204 static inline int pci_is_vf(const PCIDevice *d) 205 { 206 return d->sriov_pf || d->exp.sriov_vf.pf != NULL; 207 } 208 209 static inline uint32_t pci_config_size(const PCIDevice *d) 210 { 211 return pci_is_express(d) ? PCIE_CONFIG_SPACE_SIZE : PCI_CONFIG_SPACE_SIZE; 212 } 213 214 static inline uint16_t pci_get_bdf(PCIDevice *dev) 215 { 216 return PCI_BUILD_BDF(pci_bus_num(pci_get_bus(dev)), dev->devfn); 217 } 218 219 static inline void pci_set_power(PCIDevice *pci_dev, bool state) 220 { 221 /* 222 * Don't change the enabled state of VFs when powering on/off the device. 223 * 224 * When powering on, VFs must not be enabled immediately but they must 225 * wait until the guest configures SR-IOV. 226 * When powering off, their corresponding PFs will be reset and disable 227 * VFs. 228 */ 229 if (!pci_is_vf(pci_dev)) { 230 pci_set_enabled(pci_dev, state); 231 } 232 } 233 234 uint16_t pci_requester_id(PCIDevice *dev); 235 236 /* DMA access functions */ 237 static inline AddressSpace *pci_get_address_space(PCIDevice *dev) 238 { 239 return &dev->bus_master_as; 240 } 241 242 /** 243 * pci_dma_rw: Read from or write to an address space from PCI device. 244 * 245 * Return a MemTxResult indicating whether the operation succeeded 246 * or failed (eg unassigned memory, device rejected the transaction, 247 * IOMMU fault). 248 * 249 * @dev: #PCIDevice doing the memory access 250 * @addr: address within the #PCIDevice address space 251 * @buf: buffer with the data transferred 252 * @len: the number of bytes to read or write 253 * @dir: indicates the transfer direction 254 */ 255 static inline MemTxResult pci_dma_rw(PCIDevice *dev, dma_addr_t addr, 256 void *buf, dma_addr_t len, 257 DMADirection dir, MemTxAttrs attrs) 258 { 259 return dma_memory_rw(pci_get_address_space(dev), addr, buf, len, 260 dir, attrs); 261 } 262 263 /** 264 * pci_dma_read: Read from an address space from PCI device. 265 * 266 * Return a MemTxResult indicating whether the operation succeeded 267 * or failed (eg unassigned memory, device rejected the transaction, 268 * IOMMU fault). Called within RCU critical section. 269 * 270 * @dev: #PCIDevice doing the memory access 271 * @addr: address within the #PCIDevice address space 272 * @buf: buffer with the data transferred 273 * @len: length of the data transferred 274 */ 275 static inline MemTxResult pci_dma_read(PCIDevice *dev, dma_addr_t addr, 276 void *buf, dma_addr_t len) 277 { 278 return pci_dma_rw(dev, addr, buf, len, 279 DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED); 280 } 281 282 /** 283 * pci_dma_write: Write to address space from PCI device. 284 * 285 * Return a MemTxResult indicating whether the operation succeeded 286 * or failed (eg unassigned memory, device rejected the transaction, 287 * IOMMU fault). 288 * 289 * @dev: #PCIDevice doing the memory access 290 * @addr: address within the #PCIDevice address space 291 * @buf: buffer with the data transferred 292 * @len: the number of bytes to write 293 */ 294 static inline MemTxResult pci_dma_write(PCIDevice *dev, dma_addr_t addr, 295 const void *buf, dma_addr_t len) 296 { 297 return pci_dma_rw(dev, addr, (void *) buf, len, 298 DMA_DIRECTION_FROM_DEVICE, MEMTXATTRS_UNSPECIFIED); 299 } 300 301 #define PCI_DMA_DEFINE_LDST(_l, _s, _bits) \ 302 static inline MemTxResult ld##_l##_pci_dma(PCIDevice *dev, \ 303 dma_addr_t addr, \ 304 uint##_bits##_t *val, \ 305 MemTxAttrs attrs) \ 306 { \ 307 return ld##_l##_dma(pci_get_address_space(dev), addr, val, attrs); \ 308 } \ 309 static inline MemTxResult st##_s##_pci_dma(PCIDevice *dev, \ 310 dma_addr_t addr, \ 311 uint##_bits##_t val, \ 312 MemTxAttrs attrs) \ 313 { \ 314 return st##_s##_dma(pci_get_address_space(dev), addr, val, attrs); \ 315 } 316 317 PCI_DMA_DEFINE_LDST(ub, b, 8); 318 PCI_DMA_DEFINE_LDST(uw_le, w_le, 16) 319 PCI_DMA_DEFINE_LDST(l_le, l_le, 32); 320 PCI_DMA_DEFINE_LDST(q_le, q_le, 64); 321 PCI_DMA_DEFINE_LDST(uw_be, w_be, 16) 322 PCI_DMA_DEFINE_LDST(l_be, l_be, 32); 323 PCI_DMA_DEFINE_LDST(q_be, q_be, 64); 324 325 #undef PCI_DMA_DEFINE_LDST 326 327 /** 328 * pci_dma_map: Map device PCI address space range into host virtual address 329 * @dev: #PCIDevice to be accessed 330 * @addr: address within that device's address space 331 * @plen: pointer to length of buffer; updated on return to indicate 332 * if only a subset of the requested range has been mapped 333 * @dir: indicates the transfer direction 334 * 335 * Return: A host pointer, or %NULL if the resources needed to 336 * perform the mapping are exhausted (in that case *@plen 337 * is set to zero). 338 */ 339 static inline void *pci_dma_map(PCIDevice *dev, dma_addr_t addr, 340 dma_addr_t *plen, DMADirection dir) 341 { 342 return dma_memory_map(pci_get_address_space(dev), addr, plen, dir, 343 MEMTXATTRS_UNSPECIFIED); 344 } 345 346 static inline void pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len, 347 DMADirection dir, dma_addr_t access_len) 348 { 349 dma_memory_unmap(pci_get_address_space(dev), buffer, len, dir, access_len); 350 } 351 352 static inline void pci_dma_sglist_init(QEMUSGList *qsg, PCIDevice *dev, 353 int alloc_hint) 354 { 355 qemu_sglist_init(qsg, DEVICE(dev), alloc_hint, pci_get_address_space(dev)); 356 } 357 358 extern const VMStateDescription vmstate_pci_device; 359 360 #define VMSTATE_PCI_DEVICE(_field, _state) { \ 361 .name = (stringify(_field)), \ 362 .size = sizeof(PCIDevice), \ 363 .vmsd = &vmstate_pci_device, \ 364 .flags = VMS_STRUCT, \ 365 .offset = vmstate_offset_value(_state, _field, PCIDevice), \ 366 } 367 368 #define VMSTATE_PCI_DEVICE_POINTER(_field, _state) { \ 369 .name = (stringify(_field)), \ 370 .size = sizeof(PCIDevice), \ 371 .vmsd = &vmstate_pci_device, \ 372 .flags = VMS_STRUCT | VMS_POINTER, \ 373 .offset = vmstate_offset_pointer(_state, _field, PCIDevice), \ 374 } 375 376 #endif 377