1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef DRIVERS_PCI_H 3 #define DRIVERS_PCI_H 4 5 #include <linux/pci.h> 6 7 /* Number of possible devfns: 0.0 to 1f.7 inclusive */ 8 #define MAX_NR_DEVFNS 256 9 10 #define PCI_FIND_CAP_TTL 48 11 12 #define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */ 13 14 extern const unsigned char pcie_link_speed[]; 15 extern bool pci_early_dump; 16 17 bool pcie_cap_has_lnkctl(const struct pci_dev *dev); 18 bool pcie_cap_has_rtctl(const struct pci_dev *dev); 19 20 /* Functions internal to the PCI core code */ 21 22 int pci_create_sysfs_dev_files(struct pci_dev *pdev); 23 void pci_remove_sysfs_dev_files(struct pci_dev *pdev); 24 #if !defined(CONFIG_DMI) && !defined(CONFIG_ACPI) 25 static inline void pci_create_firmware_label_files(struct pci_dev *pdev) 26 { return; } 27 static inline void pci_remove_firmware_label_files(struct pci_dev *pdev) 28 { return; } 29 #else 30 void pci_create_firmware_label_files(struct pci_dev *pdev); 31 void pci_remove_firmware_label_files(struct pci_dev *pdev); 32 #endif 33 void pci_cleanup_rom(struct pci_dev *dev); 34 35 enum pci_mmap_api { 36 PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */ 37 PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */ 38 }; 39 int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai, 40 enum pci_mmap_api mmap_api); 41 42 int pci_probe_reset_function(struct pci_dev *dev); 43 int pci_bridge_secondary_bus_reset(struct pci_dev *dev); 44 int pci_bus_error_reset(struct pci_dev *dev); 45 46 #define PCI_PM_D2_DELAY 200 47 #define PCI_PM_D3_WAIT 10 48 #define PCI_PM_D3COLD_WAIT 100 49 #define PCI_PM_BUS_WAIT 50 50 51 /** 52 * struct pci_platform_pm_ops - Firmware PM callbacks 53 * 54 * @bridge_d3: Does the bridge allow entering into D3 55 * 56 * @is_manageable: returns 'true' if given device is power manageable by the 57 * platform firmware 58 * 59 * @set_state: invokes the platform firmware to set the device's power state 60 * 61 * @get_state: queries the platform firmware for a device's current power state 62 * 63 * @refresh_state: asks the platform to refresh the device's power state data 64 * 65 * @choose_state: returns PCI power state of given device preferred by the 66 * platform; to be used during system-wide transitions from a 67 * sleeping state to the working state and vice versa 68 * 69 * @set_wakeup: enables/disables wakeup capability for the device 70 * 71 * @need_resume: returns 'true' if the given device (which is currently 72 * suspended) needs to be resumed to be configured for system 73 * wakeup. 74 * 75 * If given platform is generally capable of power managing PCI devices, all of 76 * these callbacks are mandatory. 77 */ 78 struct pci_platform_pm_ops { 79 bool (*bridge_d3)(struct pci_dev *dev); 80 bool (*is_manageable)(struct pci_dev *dev); 81 int (*set_state)(struct pci_dev *dev, pci_power_t state); 82 pci_power_t (*get_state)(struct pci_dev *dev); 83 void (*refresh_state)(struct pci_dev *dev); 84 pci_power_t (*choose_state)(struct pci_dev *dev); 85 int (*set_wakeup)(struct pci_dev *dev, bool enable); 86 bool (*need_resume)(struct pci_dev *dev); 87 }; 88 89 int pci_set_platform_pm(const struct pci_platform_pm_ops *ops); 90 void pci_update_current_state(struct pci_dev *dev, pci_power_t state); 91 void pci_refresh_power_state(struct pci_dev *dev); 92 int pci_power_up(struct pci_dev *dev); 93 void pci_disable_enabled_device(struct pci_dev *dev); 94 int pci_finish_runtime_suspend(struct pci_dev *dev); 95 void pcie_clear_root_pme_status(struct pci_dev *dev); 96 bool pci_check_pme_status(struct pci_dev *dev); 97 void pci_pme_wakeup_bus(struct pci_bus *bus); 98 int __pci_pme_wakeup(struct pci_dev *dev, void *ign); 99 void pci_pme_restore(struct pci_dev *dev); 100 bool pci_dev_need_resume(struct pci_dev *dev); 101 void pci_dev_adjust_pme(struct pci_dev *dev); 102 void pci_dev_complete_resume(struct pci_dev *pci_dev); 103 void pci_config_pm_runtime_get(struct pci_dev *dev); 104 void pci_config_pm_runtime_put(struct pci_dev *dev); 105 void pci_pm_init(struct pci_dev *dev); 106 void pci_ea_init(struct pci_dev *dev); 107 void pci_allocate_cap_save_buffers(struct pci_dev *dev); 108 void pci_free_cap_save_buffers(struct pci_dev *dev); 109 bool pci_bridge_d3_possible(struct pci_dev *dev); 110 void pci_bridge_d3_update(struct pci_dev *dev); 111 void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev); 112 113 static inline void pci_wakeup_event(struct pci_dev *dev) 114 { 115 /* Wait 100 ms before the system can be put into a sleep state. */ 116 pm_wakeup_event(&dev->dev, 100); 117 } 118 119 static inline bool pci_has_subordinate(struct pci_dev *pci_dev) 120 { 121 return !!(pci_dev->subordinate); 122 } 123 124 static inline bool pci_power_manageable(struct pci_dev *pci_dev) 125 { 126 /* 127 * Currently we allow normal PCI devices and PCI bridges transition 128 * into D3 if their bridge_d3 is set. 129 */ 130 return !pci_has_subordinate(pci_dev) || pci_dev->bridge_d3; 131 } 132 133 static inline bool pcie_downstream_port(const struct pci_dev *dev) 134 { 135 int type = pci_pcie_type(dev); 136 137 return type == PCI_EXP_TYPE_ROOT_PORT || 138 type == PCI_EXP_TYPE_DOWNSTREAM || 139 type == PCI_EXP_TYPE_PCIE_BRIDGE; 140 } 141 142 int pci_vpd_init(struct pci_dev *dev); 143 void pci_vpd_release(struct pci_dev *dev); 144 void pcie_vpd_create_sysfs_dev_files(struct pci_dev *dev); 145 void pcie_vpd_remove_sysfs_dev_files(struct pci_dev *dev); 146 147 /* PCI Virtual Channel */ 148 int pci_save_vc_state(struct pci_dev *dev); 149 void pci_restore_vc_state(struct pci_dev *dev); 150 void pci_allocate_vc_save_buffers(struct pci_dev *dev); 151 152 /* PCI /proc functions */ 153 #ifdef CONFIG_PROC_FS 154 int pci_proc_attach_device(struct pci_dev *dev); 155 int pci_proc_detach_device(struct pci_dev *dev); 156 int pci_proc_detach_bus(struct pci_bus *bus); 157 #else 158 static inline int pci_proc_attach_device(struct pci_dev *dev) { return 0; } 159 static inline int pci_proc_detach_device(struct pci_dev *dev) { return 0; } 160 static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; } 161 #endif 162 163 /* Functions for PCI Hotplug drivers to use */ 164 int pci_hp_add_bridge(struct pci_dev *dev); 165 166 #ifdef HAVE_PCI_LEGACY 167 void pci_create_legacy_files(struct pci_bus *bus); 168 void pci_remove_legacy_files(struct pci_bus *bus); 169 #else 170 static inline void pci_create_legacy_files(struct pci_bus *bus) { return; } 171 static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; } 172 #endif 173 174 /* Lock for read/write access to pci device and bus lists */ 175 extern struct rw_semaphore pci_bus_sem; 176 extern struct mutex pci_slot_mutex; 177 178 extern raw_spinlock_t pci_lock; 179 180 extern unsigned int pci_pm_d3_delay; 181 182 #ifdef CONFIG_PCI_MSI 183 void pci_no_msi(void); 184 #else 185 static inline void pci_no_msi(void) { } 186 #endif 187 188 static inline void pci_msi_set_enable(struct pci_dev *dev, int enable) 189 { 190 u16 control; 191 192 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); 193 control &= ~PCI_MSI_FLAGS_ENABLE; 194 if (enable) 195 control |= PCI_MSI_FLAGS_ENABLE; 196 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); 197 } 198 199 static inline void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set) 200 { 201 u16 ctrl; 202 203 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 204 ctrl &= ~clear; 205 ctrl |= set; 206 pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); 207 } 208 209 void pci_realloc_get_opt(char *); 210 211 static inline int pci_no_d1d2(struct pci_dev *dev) 212 { 213 unsigned int parent_dstates = 0; 214 215 if (dev->bus->self) 216 parent_dstates = dev->bus->self->no_d1d2; 217 return (dev->no_d1d2 || parent_dstates); 218 219 } 220 extern const struct attribute_group *pci_dev_groups[]; 221 extern const struct attribute_group *pcibus_groups[]; 222 extern const struct device_type pci_dev_type; 223 extern const struct attribute_group *pci_bus_groups[]; 224 225 extern unsigned long pci_hotplug_io_size; 226 extern unsigned long pci_hotplug_mmio_size; 227 extern unsigned long pci_hotplug_mmio_pref_size; 228 extern unsigned long pci_hotplug_bus_size; 229 230 /** 231 * pci_match_one_device - Tell if a PCI device structure has a matching 232 * PCI device id structure 233 * @id: single PCI device id structure to match 234 * @dev: the PCI device structure to match against 235 * 236 * Returns the matching pci_device_id structure or %NULL if there is no match. 237 */ 238 static inline const struct pci_device_id * 239 pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev) 240 { 241 if ((id->vendor == PCI_ANY_ID || id->vendor == dev->vendor) && 242 (id->device == PCI_ANY_ID || id->device == dev->device) && 243 (id->subvendor == PCI_ANY_ID || id->subvendor == dev->subsystem_vendor) && 244 (id->subdevice == PCI_ANY_ID || id->subdevice == dev->subsystem_device) && 245 !((id->class ^ dev->class) & id->class_mask)) 246 return id; 247 return NULL; 248 } 249 250 /* PCI slot sysfs helper code */ 251 #define to_pci_slot(s) container_of(s, struct pci_slot, kobj) 252 253 extern struct kset *pci_slots_kset; 254 255 struct pci_slot_attribute { 256 struct attribute attr; 257 ssize_t (*show)(struct pci_slot *, char *); 258 ssize_t (*store)(struct pci_slot *, const char *, size_t); 259 }; 260 #define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr) 261 262 enum pci_bar_type { 263 pci_bar_unknown, /* Standard PCI BAR probe */ 264 pci_bar_io, /* An I/O port BAR */ 265 pci_bar_mem32, /* A 32-bit memory BAR */ 266 pci_bar_mem64, /* A 64-bit memory BAR */ 267 }; 268 269 struct device *pci_get_host_bridge_device(struct pci_dev *dev); 270 void pci_put_host_bridge_device(struct device *dev); 271 272 int pci_configure_extended_tags(struct pci_dev *dev, void *ign); 273 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl, 274 int crs_timeout); 275 bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl, 276 int crs_timeout); 277 int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int crs_timeout); 278 279 int pci_setup_device(struct pci_dev *dev); 280 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, 281 struct resource *res, unsigned int reg); 282 void pci_configure_ari(struct pci_dev *dev); 283 void __pci_bus_size_bridges(struct pci_bus *bus, 284 struct list_head *realloc_head); 285 void __pci_bus_assign_resources(const struct pci_bus *bus, 286 struct list_head *realloc_head, 287 struct list_head *fail_head); 288 bool pci_bus_clip_resource(struct pci_dev *dev, int idx); 289 290 void pci_reassigndev_resource_alignment(struct pci_dev *dev); 291 void pci_disable_bridge_window(struct pci_dev *dev); 292 struct pci_bus *pci_bus_get(struct pci_bus *bus); 293 void pci_bus_put(struct pci_bus *bus); 294 295 /* PCIe link information */ 296 #define PCIE_SPEED2STR(speed) \ 297 ((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \ 298 (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \ 299 (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \ 300 (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \ 301 "Unknown speed") 302 303 /* PCIe speed to Mb/s reduced by encoding overhead */ 304 #define PCIE_SPEED2MBS_ENC(speed) \ 305 ((speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \ 306 (speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \ 307 (speed) == PCIE_SPEED_5_0GT ? 5000*8/10 : \ 308 (speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \ 309 0) 310 311 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev); 312 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev); 313 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, 314 enum pcie_link_width *width); 315 void __pcie_print_link_status(struct pci_dev *dev, bool verbose); 316 void pcie_report_downtraining(struct pci_dev *dev); 317 void pcie_update_link_speed(struct pci_bus *bus, u16 link_status); 318 319 /* Single Root I/O Virtualization */ 320 struct pci_sriov { 321 int pos; /* Capability position */ 322 int nres; /* Number of resources */ 323 u32 cap; /* SR-IOV Capabilities */ 324 u16 ctrl; /* SR-IOV Control */ 325 u16 total_VFs; /* Total VFs associated with the PF */ 326 u16 initial_VFs; /* Initial VFs associated with the PF */ 327 u16 num_VFs; /* Number of VFs available */ 328 u16 offset; /* First VF Routing ID offset */ 329 u16 stride; /* Following VF stride */ 330 u16 vf_device; /* VF device ID */ 331 u32 pgsz; /* Page size for BAR alignment */ 332 u8 link; /* Function Dependency Link */ 333 u8 max_VF_buses; /* Max buses consumed by VFs */ 334 u16 driver_max_VFs; /* Max num VFs driver supports */ 335 struct pci_dev *dev; /* Lowest numbered PF */ 336 struct pci_dev *self; /* This PF */ 337 u32 class; /* VF device */ 338 u8 hdr_type; /* VF header type */ 339 u16 subsystem_vendor; /* VF subsystem vendor */ 340 u16 subsystem_device; /* VF subsystem device */ 341 resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ 342 bool drivers_autoprobe; /* Auto probing of VFs by driver */ 343 }; 344 345 /** 346 * pci_dev_set_io_state - Set the new error state if possible. 347 * 348 * @dev - pci device to set new error_state 349 * @new - the state we want dev to be in 350 * 351 * Must be called with device_lock held. 352 * 353 * Returns true if state has been changed to the requested state. 354 */ 355 static inline bool pci_dev_set_io_state(struct pci_dev *dev, 356 pci_channel_state_t new) 357 { 358 bool changed = false; 359 360 device_lock_assert(&dev->dev); 361 switch (new) { 362 case pci_channel_io_perm_failure: 363 switch (dev->error_state) { 364 case pci_channel_io_frozen: 365 case pci_channel_io_normal: 366 case pci_channel_io_perm_failure: 367 changed = true; 368 break; 369 } 370 break; 371 case pci_channel_io_frozen: 372 switch (dev->error_state) { 373 case pci_channel_io_frozen: 374 case pci_channel_io_normal: 375 changed = true; 376 break; 377 } 378 break; 379 case pci_channel_io_normal: 380 switch (dev->error_state) { 381 case pci_channel_io_frozen: 382 case pci_channel_io_normal: 383 changed = true; 384 break; 385 } 386 break; 387 } 388 if (changed) 389 dev->error_state = new; 390 return changed; 391 } 392 393 static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused) 394 { 395 device_lock(&dev->dev); 396 pci_dev_set_io_state(dev, pci_channel_io_perm_failure); 397 device_unlock(&dev->dev); 398 399 return 0; 400 } 401 402 static inline bool pci_dev_is_disconnected(const struct pci_dev *dev) 403 { 404 return dev->error_state == pci_channel_io_perm_failure; 405 } 406 407 /* pci_dev priv_flags */ 408 #define PCI_DEV_ADDED 0 409 410 static inline void pci_dev_assign_added(struct pci_dev *dev, bool added) 411 { 412 assign_bit(PCI_DEV_ADDED, &dev->priv_flags, added); 413 } 414 415 static inline bool pci_dev_is_added(const struct pci_dev *dev) 416 { 417 return test_bit(PCI_DEV_ADDED, &dev->priv_flags); 418 } 419 420 #ifdef CONFIG_PCIEAER 421 #include <linux/aer.h> 422 423 #define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */ 424 425 struct aer_err_info { 426 struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES]; 427 int error_dev_num; 428 429 unsigned int id:16; 430 431 unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */ 432 unsigned int __pad1:5; 433 unsigned int multi_error_valid:1; 434 435 unsigned int first_error:5; 436 unsigned int __pad2:2; 437 unsigned int tlp_header_valid:1; 438 439 unsigned int status; /* COR/UNCOR Error Status */ 440 unsigned int mask; /* COR/UNCOR Error Mask */ 441 struct aer_header_log_regs tlp; /* TLP Header */ 442 }; 443 444 int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info); 445 void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); 446 #endif /* CONFIG_PCIEAER */ 447 448 #ifdef CONFIG_PCIE_DPC 449 void pci_save_dpc_state(struct pci_dev *dev); 450 void pci_restore_dpc_state(struct pci_dev *dev); 451 #else 452 static inline void pci_save_dpc_state(struct pci_dev *dev) {} 453 static inline void pci_restore_dpc_state(struct pci_dev *dev) {} 454 #endif 455 456 #ifdef CONFIG_PCI_ATS 457 /* Address Translation Service */ 458 void pci_ats_init(struct pci_dev *dev); 459 void pci_restore_ats_state(struct pci_dev *dev); 460 #else 461 static inline void pci_ats_init(struct pci_dev *d) { } 462 static inline void pci_restore_ats_state(struct pci_dev *dev) { } 463 #endif /* CONFIG_PCI_ATS */ 464 465 #ifdef CONFIG_PCI_PRI 466 void pci_pri_init(struct pci_dev *dev); 467 void pci_restore_pri_state(struct pci_dev *pdev); 468 #else 469 static inline void pci_pri_init(struct pci_dev *dev) { } 470 static inline void pci_restore_pri_state(struct pci_dev *pdev) { } 471 #endif 472 473 #ifdef CONFIG_PCI_PASID 474 void pci_pasid_init(struct pci_dev *dev); 475 void pci_restore_pasid_state(struct pci_dev *pdev); 476 #else 477 static inline void pci_pasid_init(struct pci_dev *dev) { } 478 static inline void pci_restore_pasid_state(struct pci_dev *pdev) { } 479 #endif 480 481 #ifdef CONFIG_PCI_IOV 482 int pci_iov_init(struct pci_dev *dev); 483 void pci_iov_release(struct pci_dev *dev); 484 void pci_iov_remove(struct pci_dev *dev); 485 void pci_iov_update_resource(struct pci_dev *dev, int resno); 486 resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); 487 void pci_restore_iov_state(struct pci_dev *dev); 488 int pci_iov_bus_range(struct pci_bus *bus); 489 extern const struct attribute_group sriov_dev_attr_group; 490 #else 491 static inline int pci_iov_init(struct pci_dev *dev) 492 { 493 return -ENODEV; 494 } 495 static inline void pci_iov_release(struct pci_dev *dev) 496 497 { 498 } 499 static inline void pci_iov_remove(struct pci_dev *dev) 500 { 501 } 502 static inline void pci_restore_iov_state(struct pci_dev *dev) 503 { 504 } 505 static inline int pci_iov_bus_range(struct pci_bus *bus) 506 { 507 return 0; 508 } 509 510 #endif /* CONFIG_PCI_IOV */ 511 512 unsigned long pci_cardbus_resource_alignment(struct resource *); 513 514 static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, 515 struct resource *res) 516 { 517 #ifdef CONFIG_PCI_IOV 518 int resno = res - dev->resource; 519 520 if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) 521 return pci_sriov_resource_alignment(dev, resno); 522 #endif 523 if (dev->class >> 8 == PCI_CLASS_BRIDGE_CARDBUS) 524 return pci_cardbus_resource_alignment(res); 525 return resource_alignment(res); 526 } 527 528 void pci_enable_acs(struct pci_dev *dev); 529 #ifdef CONFIG_PCI_QUIRKS 530 int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); 531 int pci_dev_specific_enable_acs(struct pci_dev *dev); 532 int pci_dev_specific_disable_acs_redir(struct pci_dev *dev); 533 #else 534 static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev, 535 u16 acs_flags) 536 { 537 return -ENOTTY; 538 } 539 static inline int pci_dev_specific_enable_acs(struct pci_dev *dev) 540 { 541 return -ENOTTY; 542 } 543 static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev) 544 { 545 return -ENOTTY; 546 } 547 #endif 548 549 /* PCI error reporting and recovery */ 550 void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state, 551 u32 service); 552 553 bool pcie_wait_for_link(struct pci_dev *pdev, bool active); 554 #ifdef CONFIG_PCIEASPM 555 void pcie_aspm_init_link_state(struct pci_dev *pdev); 556 void pcie_aspm_exit_link_state(struct pci_dev *pdev); 557 void pcie_aspm_pm_state_change(struct pci_dev *pdev); 558 void pcie_aspm_powersave_config_link(struct pci_dev *pdev); 559 #else 560 static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { } 561 static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { } 562 static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { } 563 static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { } 564 #endif 565 566 #ifdef CONFIG_PCIE_ECRC 567 void pcie_set_ecrc_checking(struct pci_dev *dev); 568 void pcie_ecrc_get_policy(char *str); 569 #else 570 static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { } 571 static inline void pcie_ecrc_get_policy(char *str) { } 572 #endif 573 574 #ifdef CONFIG_PCIE_PTM 575 void pci_ptm_init(struct pci_dev *dev); 576 int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); 577 #else 578 static inline void pci_ptm_init(struct pci_dev *dev) { } 579 static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) 580 { return -EINVAL; } 581 #endif 582 583 struct pci_dev_reset_methods { 584 u16 vendor; 585 u16 device; 586 int (*reset)(struct pci_dev *dev, int probe); 587 }; 588 589 #ifdef CONFIG_PCI_QUIRKS 590 int pci_dev_specific_reset(struct pci_dev *dev, int probe); 591 #else 592 static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe) 593 { 594 return -ENOTTY; 595 } 596 #endif 597 598 #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64) 599 int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment, 600 struct resource *res); 601 #endif 602 603 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar); 604 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar); 605 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size); 606 static inline u64 pci_rebar_size_to_bytes(int size) 607 { 608 return 1ULL << (size + 20); 609 } 610 611 struct device_node; 612 613 #ifdef CONFIG_OF 614 int of_pci_parse_bus_range(struct device_node *node, struct resource *res); 615 int of_get_pci_domain_nr(struct device_node *node); 616 int of_pci_get_max_link_speed(struct device_node *node); 617 void pci_set_of_node(struct pci_dev *dev); 618 void pci_release_of_node(struct pci_dev *dev); 619 void pci_set_bus_of_node(struct pci_bus *bus); 620 void pci_release_bus_of_node(struct pci_bus *bus); 621 622 #else 623 static inline int 624 of_pci_parse_bus_range(struct device_node *node, struct resource *res) 625 { 626 return -EINVAL; 627 } 628 629 static inline int 630 of_get_pci_domain_nr(struct device_node *node) 631 { 632 return -1; 633 } 634 635 static inline int 636 of_pci_get_max_link_speed(struct device_node *node) 637 { 638 return -EINVAL; 639 } 640 641 static inline void pci_set_of_node(struct pci_dev *dev) { } 642 static inline void pci_release_of_node(struct pci_dev *dev) { } 643 static inline void pci_set_bus_of_node(struct pci_bus *bus) { } 644 static inline void pci_release_bus_of_node(struct pci_bus *bus) { } 645 #endif /* CONFIG_OF */ 646 647 #ifdef CONFIG_PCIEAER 648 void pci_no_aer(void); 649 void pci_aer_init(struct pci_dev *dev); 650 void pci_aer_exit(struct pci_dev *dev); 651 extern const struct attribute_group aer_stats_attr_group; 652 void pci_aer_clear_fatal_status(struct pci_dev *dev); 653 void pci_aer_clear_device_status(struct pci_dev *dev); 654 #else 655 static inline void pci_no_aer(void) { } 656 static inline void pci_aer_init(struct pci_dev *d) { } 657 static inline void pci_aer_exit(struct pci_dev *d) { } 658 static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { } 659 static inline void pci_aer_clear_device_status(struct pci_dev *dev) { } 660 #endif 661 662 #ifdef CONFIG_ACPI 663 int pci_acpi_program_hp_params(struct pci_dev *dev); 664 #else 665 static inline int pci_acpi_program_hp_params(struct pci_dev *dev) 666 { 667 return -ENODEV; 668 } 669 #endif 670 671 #ifdef CONFIG_PCIEASPM 672 extern const struct attribute_group aspm_ctrl_attr_group; 673 #endif 674 675 #endif /* DRIVERS_PCI_H */ 676