Lines Matching +defs:dev +defs:all

84 int pcie_failed_link_retrain(struct pci_dev *dev)
93 if (!pci_is_pcie(dev) || !pcie_downstream_port(dev) ||
94 !pcie_cap_has_lnkctl2(dev) || !dev->link_active_reporting)
97 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2);
98 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
103 pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n");
107 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2);
109 ret = pcie_retrain_link(dev, false);
111 pci_info(dev, "retraining failed\n");
112 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2,
114 pcie_retrain_link(dev, true);
118 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
123 pci_match_id(ids, dev)) {
126 pci_info(dev, "removing 2.5GT/s downstream link speed restriction\n");
127 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
130 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2);
132 ret = pcie_retrain_link(dev, false);
134 pci_info(dev, "retraining failed\n");
142 static ktime_t fixup_debug_start(struct pci_dev *dev,
143 void (*fn)(struct pci_dev *dev))
146 pci_info(dev, "calling %pS @ %i\n", fn, task_pid_nr(current));
151 static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
152 void (*fn)(struct pci_dev *dev))
161 pci_info(dev, "%pS took %lld usecs\n", fn, duration);
164 static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
170 if ((f->class == (u32) (dev->class >> f->class_shift) ||
172 (f->vendor == dev->vendor ||
174 (f->device == dev->device ||
176 void (*hook)(struct pci_dev *dev);
182 calltime = fixup_debug_start(dev, hook);
183 hook(dev);
184 fixup_debug_report(dev, calltime, hook);
207 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
258 pci_do_fixups(dev, start, end);
264 struct pci_dev *dev = NULL;
272 for_each_pci_dev(dev) {
273 pci_fixup_device(pci_fixup_final, dev);
276 * value shared by all PCI devices. If there's a
280 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
286 pci_info(dev, "CLS mismatch (%u != %u), using %u bytes\n",
307 * we need to set the dev->mmio_always_on bit.
309 static void quirk_mmio_always_on(struct pci_dev *dev)
311 dev->mmio_always_on = 1;
327 static void quirk_passive_release(struct pci_dev *dev)
357 static void quirk_isa_dma_hangs(struct pci_dev *dev)
361 pci_info(dev, "Activating ISA DMA hang workarounds\n");
382 static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
387 pci_read_config_dword(dev, 0x40, &pmbase);
392 pci_info(dev, FW_BUG "Tiger Point LPC.BM_STS cleared\n");
400 static void quirk_nopcipci(struct pci_dev *dev)
403 pci_info(dev, "Disabling direct PCI/PCI transfers\n");
410 static void quirk_nopciamd(struct pci_dev *dev)
413 pci_read_config_byte(dev, 0x08, &rev);
416 pci_info(dev, "Chipset erratum: Disabling direct PCI/AGP transfers\n");
423 static void quirk_triton(struct pci_dev *dev)
426 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
445 static void quirk_vialatency(struct pci_dev *dev)
486 pci_read_config_byte(dev, 0x76, &busarb);
494 pci_write_config_byte(dev, 0x76, busarb);
495 pci_info(dev, "Applying VIA southbridge workaround\n");
508 static void quirk_viaetbf(struct pci_dev *dev)
511 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
517 static void quirk_vsfx(struct pci_dev *dev)
520 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
531 static void quirk_alimagik(struct pci_dev *dev)
534 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
542 static void quirk_natoma(struct pci_dev *dev)
545 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
560 static void quirk_citrine(struct pci_dev *dev)
562 dev->cfg_size = 0xA0;
570 static void quirk_nfp6000(struct pci_dev *dev)
572 dev->cfg_size = 0x600;
580 static void quirk_extend_bar_to_page(struct pci_dev *dev)
585 struct resource *r = &dev->resource[i];
591 pci_info(dev, "expanded BAR %d to page size: %pR\n",
602 static void quirk_s3_64M(struct pci_dev *dev)
604 struct resource *r = &dev->resource[0];
615 static void quirk_io(struct pci_dev *dev, int pos, unsigned int size,
620 struct resource *res = dev->resource + pos;
622 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region);
627 res->name = pci_name(dev);
636 pcibios_bus_to_resource(dev->bus, res, &bus_region);
638 pci_info(dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
651 static void quirk_cs5536_vsa(struct pci_dev *dev)
655 if (pci_resource_len(dev, 0) != 8) {
656 quirk_io(dev, 0, 8, name); /* SMB */
657 quirk_io(dev, 1, 256, name); /* GPIO */
658 quirk_io(dev, 2, 64, name); /* MFGPT */
659 pci_info(dev, "%s bug detected (incorrect header); workaround applied\n",
665 static void quirk_io_region(struct pci_dev *dev, int port,
670 struct resource *res = dev->resource + nr;
672 pci_read_config_word(dev, port, &region);
678 res->name = pci_name(dev);
684 pcibios_bus_to_resource(dev->bus, res, &bus_region);
686 if (!pci_claim_resource(dev, nr))
687 pci_info(dev, "quirk: %pR claimed by %s\n", res, name);
694 static void quirk_ati_exploding_mce(struct pci_dev *dev)
696 pci_info(dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n");
766 static void quirk_ali7101_acpi(struct pci_dev *dev)
768 quirk_io_region(dev, 0xE0, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
769 quirk_io_region(dev, 0xE2, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
773 static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
778 pci_read_config_dword(dev, port, &devres);
796 pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1);
799 static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
804 pci_read_config_dword(dev, port, &devres);
822 pci_info(dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1);
831 static void quirk_piix4_acpi(struct pci_dev *dev)
835 quirk_io_region(dev, 0x40, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
836 quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
839 pci_read_config_dword(dev, 0x5c, &res_a);
841 piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21);
842 piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21);
848 piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20);
849 piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7);
853 piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20);
854 piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7);
856 piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20);
857 piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20);
878 static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
889 pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
891 quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
894 pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable);
896 quirk_io_region(dev, ICH4_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
910 static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
914 pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
916 quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
919 pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable);
921 quirk_io_region(dev, ICH6_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
925 static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned int reg,
931 pci_read_config_dword(dev, reg, &val);
954 pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
957 static void quirk_ich6_lpc(struct pci_dev *dev)
959 /* Shared ACPI/GPIO decode with all ICH6+ */
960 ich6_lpc_acpi_gpio(dev);
963 ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0);
964 ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1);
969 static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned int reg,
975 pci_read_config_dword(dev, reg, &val);
990 pci_info(dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
994 static void quirk_ich7_lpc(struct pci_dev *dev)
997 ich6_lpc_acpi_gpio(dev);
1000 ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1");
1001 ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2");
1002 ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3");
1003 ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4");
1023 static void quirk_vt82c586_acpi(struct pci_dev *dev)
1025 if (dev->revision & 0x10)
1026 quirk_io_region(dev, 0x48, 256, PCI_BRIDGE_RESOURCES,
1037 static void quirk_vt82c686_acpi(struct pci_dev *dev)
1039 quirk_vt82c586_acpi(dev);
1041 quirk_io_region(dev, 0x70, 128, PCI_BRIDGE_RESOURCES+1,
1044 quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+2, "vt82c686 SMB");
1053 static void quirk_vt8235_acpi(struct pci_dev *dev)
1055 quirk_io_region(dev, 0x88, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
1056 quirk_io_region(dev, 0xd0, 16, PCI_BRIDGE_RESOURCES+1, "vt8235 SMB");
1064 static void quirk_xio2000a(struct pci_dev *dev)
1069 pci_warn(dev, "TI XIO2000a quirk detected; secondary bus fast back-to-back transfers disabled\n");
1070 list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
1084 * VIA 686A/B: If an IO-APIC is active, we need to route all on-chip
1090 static void quirk_via_ioapic(struct pci_dev *dev)
1097 tmp = 0x1f; /* all known bits (4-0) routed to external APIC */
1099 pci_info(dev, "%s VIA external APIC routing\n",
1103 pci_write_config_byte(dev, 0x58, tmp);
1114 static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
1119 pci_read_config_byte(dev, 0x5B, &misc_control2);
1121 pci_info(dev, "Bypassing VIA 8237 APIC De-Assert Message\n");
1122 pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT);
1130 * We check all revs >= B0 (yet not in the pre production!) as the bug
1137 static void quirk_amd_ioapic(struct pci_dev *dev)
1139 if (dev->revision >= 0x02) {
1140 pci_warn(dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n");
1141 pci_warn(dev, " : booting with the \"noapic\" option\n");
1149 static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev)
1152 if (dev->subsystem_device == 0xa118)
1153 dev->sriov->link = dev->devfn;
1162 static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
1164 if (dev->subordinate && dev->revision <= 0x12) {
1165 pci_info(dev, "AMD8131 rev %x detected; disabling PCI-X MMRBC\n",
1166 dev->revision);
1167 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC;
1175 * at all. Therefore it seems like setting the pci_dev's IRQ to the value
1195 static void quirk_via_bridge(struct pci_dev *dev)
1198 switch (dev->device) {
1205 via_vlink_dev_lo = PCI_SLOT(dev->devfn);
1206 via_vlink_dev_hi = PCI_SLOT(dev->devfn);
1234 * @dev: PCI device
1243 static void quirk_via_vlink(struct pci_dev *dev)
1247 /* Check if we have VLink at all */
1251 new_irq = dev->irq;
1258 if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) > via_vlink_dev_hi ||
1259 PCI_SLOT(dev->devfn) < via_vlink_dev_lo)
1266 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1268 pci_info(dev, "VIA VLink IRQ fixup, from %d to %d\n",
1271 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
1281 static void quirk_vt82c598_id(struct pci_dev *dev)
1283 pci_write_config_byte(dev, 0xfc, 0);
1284 pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device);
1294 static void quirk_cardbus_legacy(struct pci_dev *dev)
1296 pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
1310 static void quirk_amd_ordering(struct pci_dev *dev)
1313 pci_read_config_dword(dev, 0x4C, &pcic);
1316 pci_warn(dev, "BIOS failed to enable PCI standards compliance; fixing this error\n");
1317 pci_write_config_dword(dev, 0x4C, pcic);
1318 pci_read_config_dword(dev, 0x84, &pcic);
1320 pci_write_config_dword(dev, 0x84, pcic);
1333 static void quirk_dunord(struct pci_dev *dev)
1335 struct resource *r = &dev->resource[1];
1348 static void quirk_transparent_bridge(struct pci_dev *dev)
1350 dev->transparent = 1;
1361 static void quirk_mediagx_master(struct pci_dev *dev)
1365 pci_read_config_byte(dev, 0x41, &reg);
1368 pci_info(dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n",
1370 pci_write_config_byte(dev, 0x41, reg);
1475 static void quirk_eisa_bridge(struct pci_dev *dev)
1477 dev->class = PCI_CLASS_BRIDGE_EISA << 8;
1508 static void asus_hides_smbus_hostbridge(struct pci_dev *dev)
1510 if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
1511 if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
1512 switch (dev->subsystem_device) {
1519 else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
1520 switch (dev->subsystem_device) {
1526 else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
1527 switch (dev->subsystem_device) {
1531 else if (dev->device == PCI_DEVICE_ID_INTEL_7205_0)
1532 switch (dev->subsystem_device) {
1536 else if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH)
1537 switch (dev->subsystem_device) {
1541 else if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
1542 switch (dev->subsystem_device) {
1548 else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1549 switch (dev->subsystem_device) {
1554 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1555 switch (dev->subsystem_device) {
1559 else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
1560 switch (dev->subsystem_device) {
1565 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
1566 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1567 switch (dev->subsystem_device) {
1572 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1573 switch (dev->subsystem_device) {
1579 else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
1580 switch (dev->subsystem_device) {
1584 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
1585 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1586 switch (dev->subsystem_device) {
1590 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) {
1591 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1592 switch (dev->subsystem_device) {
1596 else if (dev->device == PCI_DEVICE_ID_INTEL_82810_IG3)
1597 switch (dev->subsystem_device) {
1604 else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
1605 switch (dev->subsystem_device) {
1616 else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
1617 switch (dev->subsystem_device) {
1641 static void asus_hides_smbus_lpc(struct pci_dev *dev)
1648 pci_read_config_word(dev, 0xF2, &val);
1650 pci_write_config_word(dev, 0xF2, val & (~0x8));
1651 pci_read_config_word(dev, 0xF2, &val);
1653 pci_info(dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n",
1656 pci_info(dev, "Enabled i801 SMBus device\n");
1676 static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
1684 pci_read_config_dword(dev, 0xF0, &rcba);
1691 static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
1705 static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
1712 pci_info(dev, "Enabled ICH6/i801 SMBus device\n");
1715 static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
1717 asus_hides_smbus_lpc_ich6_suspend(dev);
1718 asus_hides_smbus_lpc_ich6_resume_early(dev);
1719 asus_hides_smbus_lpc_ich6_resume(dev);
1727 static void quirk_sis_96x_smbus(struct pci_dev *dev)
1730 pci_read_config_byte(dev, 0x77, &val);
1732 pci_info(dev, "Enabling SiS 96x SMBus\n");
1733 pci_write_config_byte(dev, 0x77, val & ~0x10);
1755 static void quirk_sis_503(struct pci_dev *dev)
1760 pci_read_config_byte(dev, SIS_DETECT_REGISTER, &reg);
1761 pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6));
1762 pci_read_config_word(dev, PCI_DEVICE_ID, &devid);
1764 pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg);
1773 dev->device = devid;
1774 quirk_sis_96x_smbus(dev);
1785 static void asus_hides_ac97_lpc(struct pci_dev *dev)
1790 if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
1791 if (dev->device == PCI_DEVICE_ID_VIA_8237)
1798 pci_read_config_byte(dev, 0x50, &val);
1800 pci_write_config_byte(dev, 0x50, val & (~0xc0));
1801 pci_read_config_byte(dev, 0x50, &val);
1803 pci_info(dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n",
1806 pci_info(dev, "Enabled onboard AC97/MC97 devices\n");
1892 static void quirk_jmicron_async_suspend(struct pci_dev *dev)
1894 if (dev->multifunction) {
1895 device_disable_async_suspend(&dev->dev);
1896 pci_info(dev, "async suspend disabled to avoid multi-function power-on ordering issue\n");
1921 * The next five BARs all seem to be rubbish, so just clean
1930 static void quirk_no_msi(struct pci_dev *dev)
1932 pci_info(dev, "avoiding MSI to work around a hardware defect\n");
1933 dev->no_msi = 1;
1980 if (!pdev->dev.of_node &&
1981 device_create_managed_software_node(&pdev->dev, properties, NULL))
1995 static void quirk_pcie_pxh(struct pci_dev *dev)
1997 dev->no_msi = 1;
1998 pci_warn(dev, "PXH quirk detected; SHPC device MSI disabled\n");
2010 static void quirk_intel_pcie_pm(struct pci_dev *dev)
2013 dev->no_d1d2 = 1;
2037 static void quirk_d3hot_delay(struct pci_dev *dev, unsigned int delay)
2039 if (dev->d3hot_delay >= delay)
2042 dev->d3hot_delay = delay;
2043 pci_info(dev, "extending delay after power-on from D3hot to %d msec\n",
2044 dev->d3hot_delay);
2047 static void quirk_radeon_pm(struct pci_dev *dev)
2049 if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
2050 dev->subsystem_device == 0x00e2)
2051 quirk_d3hot_delay(dev, 20);
2058 * to previous effective default for all NVIDIA HDA controllers.
2060 static void quirk_nvidia_hda_pm(struct pci_dev *dev)
2062 quirk_d3hot_delay(dev, 20);
2077 static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
2079 quirk_d3hot_delay(dev, 20);
2115 static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
2121 dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
2122 pci_info(dev, "rerouting interrupts for [%04x:%04x]\n",
2123 dev->vendor, dev->device);
2167 static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
2175 switch (dev->device) {
2177 pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR,
2180 pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR,
2188 pci_read_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
2191 pci_write_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
2197 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2198 dev->vendor, dev->device);
2243 static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
2251 pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword);
2252 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword |
2260 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
2262 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2263 dev->vendor, dev->device);
2280 static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
2286 if ((dev->revision == AMD_813X_REV_B1) ||
2287 (dev->revision == AMD_813X_REV_B2))
2290 pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
2292 pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
2294 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2295 dev->vendor, dev->device);
2304 static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
2311 pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
2313 pci_info(dev, "boot interrupts on device [%04x:%04x] already disabled\n",
2314 dev->vendor, dev->device);
2317 pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
2318 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2319 dev->vendor, dev->device);
2330 static void quirk_tc86c001_ide(struct pci_dev *dev)
2332 struct resource *r = &dev->resource[0];
2351 static void quirk_plx_pci9050(struct pci_dev *dev)
2356 if (dev->revision >= 2)
2359 if (pci_resource_len(dev, bar) == 0x80 &&
2360 (pci_resource_start(dev, bar) & 0x80)) {
2361 struct resource *r = &dev->resource[bar];
2362 pci_info(dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
2383 static void quirk_netmos(struct pci_dev *dev)
2385 unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
2386 unsigned int num_serial = dev->subsystem_device & 0xf;
2398 switch (dev->device) {
2401 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
2402 dev->subsystem_device == 0x0299)
2410 pci_info(dev, "Netmos %04x (%u parallel, %u serial); changing class SERIAL to OTHER (use parport_serial)\n",
2411 dev->device, num_parallel, num_serial);
2412 dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) |
2413 (dev->class & 0xff);
2420 static void quirk_e100_interrupt(struct pci_dev *dev)
2426 switch (dev->device) {
2450 * disable all e100 interrupts here. The driver will
2453 pci_read_config_word(dev, PCI_COMMAND, &command);
2455 if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0))
2462 if (dev->pm_cap) {
2463 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2469 csr = ioremap(pci_resource_start(dev, 0), 8);
2471 pci_warn(dev, "Can't map e100 registers\n");
2477 pci_warn(dev, "Firmware left e100 interrupts enabled; disabling\n");
2490 static void quirk_disable_aspm_l0s(struct pci_dev *dev)
2492 pci_info(dev, "Disabling L0s\n");
2493 pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
2510 static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
2512 pci_info(dev, "Disabling ASPM L0s/L1\n");
2513 pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
2531 static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
2533 dev->clear_retrain_link = 1;
2534 pci_info(dev, "Enable PCIe Retrain Link quirk\n");
2540 static void fixup_rev1_53c810(struct pci_dev *dev)
2542 u32 class = dev->class;
2545 * rev 1 ncr53c810 chips don't set the class at all which means
2551 dev->class = PCI_CLASS_STORAGE_SCSI << 8;
2552 pci_info(dev, "NCR 53c810 rev 1 PCI class overridden (%#08x -> %#08x)\n",
2553 class, dev->class);
2558 static void quirk_p64h2_1k_io(struct pci_dev *dev)
2562 pci_read_config_word(dev, 0x40, &en1k);
2565 pci_info(dev, "Enable I/O Space to 1KB granularity\n");
2566 dev->io_window_1k = 1;
2576 static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
2580 if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
2582 pci_write_config_byte(dev, 0xf41, b | 0x20);
2583 pci_info(dev, "Linking AER extended capability\n");
2592 static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
2618 if (pci_read_config_byte(dev, 0x76, &b) == 0) {
2621 pci_write_config_byte(dev, 0x76, b ^ 0x40);
2623 pci_info(dev, "Disabling VIA CX700 PCI parking\n");
2627 if (pci_read_config_byte(dev, 0x72, &b) == 0) {
2630 pci_write_config_byte(dev, 0x72, 0x0);
2633 pci_write_config_byte(dev, 0x75, 0x1);
2636 pci_write_config_byte(dev, 0x77, 0x0);
2638 pci_info(dev, "Disabling VIA CX700 PCI caching\n");
2644 static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
2648 pci_read_config_dword(dev, 0xf4, &rev);
2652 int readrq = pcie_get_readrq(dev);
2654 pcie_set_readrq(dev, 2048);
2667 static void quirk_unhide_mch_dev6(struct pci_dev *dev)
2671 if (pci_read_config_byte(dev, 0xF4, &reg) == 0 && !(reg & 0x02)) {
2672 pci_info(dev, "Enabling MCH 'Overflow' Device\n");
2673 pci_write_config_byte(dev, 0xF4, reg | 0x02);
2686 * Instead of setting the flag on all buses in the machine, simply disable
2689 static void quirk_disable_all_msi(struct pci_dev *dev)
2692 pci_warn(dev, "MSI quirk detected; MSI disabled\n");
2705 static void quirk_disable_msi(struct pci_dev *dev)
2707 if (dev->subordinate) {
2708 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2709 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2740 static int msi_ht_cap_enabled(struct pci_dev *dev)
2744 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2748 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2750 pci_info(dev, "Found %s HT MSI Mapping\n",
2756 pos = pci_find_next_ht_capability(dev, pos,
2763 static void quirk_msi_ht_cap(struct pci_dev *dev)
2765 if (!msi_ht_cap_enabled(dev))
2766 quirk_disable_msi(dev);
2775 static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
2783 pdev = pci_get_slot(dev->bus, 0);
2787 quirk_msi_ht_cap(dev);
2794 static void ht_enable_msi_mapping(struct pci_dev *dev)
2798 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2802 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2804 pci_info(dev, "Enabling HT MSI Mapping\n");
2806 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2809 pos = pci_find_next_ht_capability(dev, pos,
2824 static void nvenet_msi_disable(struct pci_dev *dev)
2831 pci_info(dev, "Disabling MSI for MCP55 NIC on P5N32-SLI\n");
2832 dev->no_msi = 1;
2848 static void pci_quirk_nvidia_tegra_disable_rp_msi(struct pci_dev *dev)
2850 dev->no_msi = 1;
2906 * conventional systems where the IRQ is broadcast to all online CPUs. Not
2911 static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
2915 if (!pci_find_capability(dev, PCI_CAP_ID_HT))
2918 pci_read_config_dword(dev, 0x74, &cfg);
2923 pci_write_config_dword(dev, 0x74, cfg);
2933 static int ht_check_msi_mapping(struct pci_dev *dev)
2939 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2945 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2954 pos = pci_find_next_ht_capability(dev, pos,
2963 struct pci_dev *dev;
2970 dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0));
2971 if (!dev)
2975 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2977 pci_dev_put(dev);
2981 if (ht_check_msi_mapping(dev)) {
2983 pci_dev_put(dev);
2986 pci_dev_put(dev);
2995 static int is_end_of_ht_chain(struct pci_dev *dev)
3001 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
3006 pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags);
3010 pci_read_config_word(dev, pos + ctrl_off, &ctrl);
3019 static void nv_ht_enable_msi_mapping(struct pci_dev *dev)
3026 dev_no = dev->devfn >> 3;
3028 host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0));
3044 if (host_bridge == dev && is_end_of_ht_chain(host_bridge) &&
3052 ht_enable_msi_mapping(dev);
3058 static void ht_disable_msi_mapping(struct pci_dev *dev)
3062 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
3066 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
3068 pci_info(dev, "Disabling HT MSI Mapping\n");
3070 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
3073 pos = pci_find_next_ht_capability(dev, pos,
3078 static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
3088 found = ht_check_msi_mapping(dev);
3098 host_bridge = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), 0,
3101 pci_warn(dev, "nv_msi_ht_cap_quirk didn't locate host bridge\n");
3110 if (all)
3111 ht_enable_msi_mapping(dev);
3113 nv_ht_enable_msi_mapping(dev);
3123 ht_disable_msi_mapping(dev);
3129 static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
3131 return __nv_msi_ht_cap_quirk(dev, 1);
3136 static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
3138 return __nv_msi_ht_cap_quirk(dev, 0);
3143 static void quirk_msi_intx_disable_bug(struct pci_dev *dev)
3145 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
3148 static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
3163 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
3167 static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
3170 if (dev->revision < 0x18) {
3171 pci_info(dev, "set MSI_INTX_DISABLE_BUG flag\n");
3172 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
3245 static void quirk_al_msi_disable(struct pci_dev *dev)
3247 dev->no_msi = 1;
3248 pci_warn(dev, "Disabling MSI/MSI-X\n");
3261 static void quirk_hotplug_bridge(struct pci_dev *dev)
3263 dev->is_hotplug_bridge = 1;
3293 static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
3304 if (PCI_FUNC(dev->devfn))
3307 pci_read_config_byte(dev, 0xB7, &disable);
3311 pci_read_config_byte(dev, 0x8E, &write_enable);
3312 pci_write_config_byte(dev, 0x8E, 0xAA);
3313 pci_read_config_byte(dev, 0x8D, &write_target);
3314 pci_write_config_byte(dev, 0x8D, 0xB7);
3315 pci_write_config_byte(dev, 0xB7, disable | 0x02);
3316 pci_write_config_byte(dev, 0x8E, write_enable);
3317 pci_write_config_byte(dev, 0x8D, write_target);
3319 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via CardBus function)\n");
3320 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
3325 static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
3335 if (PCI_FUNC(dev->devfn))
3349 if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
3350 dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
3351 pci_write_config_byte(dev, 0xf9, 0xfc);
3352 pci_write_config_byte(dev, 0x150, 0x10);
3353 pci_write_config_byte(dev, 0xf9, 0x00);
3354 pci_write_config_byte(dev, 0xfc, 0x01);
3355 pci_write_config_byte(dev, 0xe1, 0x32);
3356 pci_write_config_byte(dev, 0xfc, 0x00);
3358 pci_notice(dev, "MMC controller base frequency changed to 50Mhz.\n");
3361 pci_read_config_byte(dev, 0xCB, &disable);
3366 pci_read_config_byte(dev, 0xCA, &write_enable);
3367 pci_write_config_byte(dev, 0xCA, 0x57);
3368 pci_write_config_byte(dev, 0xCB, disable | 0x02);
3369 pci_write_config_byte(dev, 0xCA, write_enable);
3371 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via FireWire function)\n");
3372 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
3396 static void vtd_mask_spec_errors(struct pci_dev *dev)
3400 pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
3401 pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
3407 static void fixup_ti816x_class(struct pci_dev *dev)
3409 u32 class = dev->class;
3412 dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
3413 pci_info(dev, "PCI class overridden (%#08x -> %#08x)\n",
3414 class, dev->class);
3423 static void fixup_mpss_256(struct pci_dev *dev)
3425 dev->pcie_mpss = 1; /* 256 bytes */
3439 * until all of the devices are discovered and buses walked, read completion
3443 static void quirk_intel_mc_errata(struct pci_dev *dev)
3457 err = pci_read_config_word(dev, 0x48, &rcc);
3459 pci_err(dev, "Error attempting to read the read completion coalescing register\n");
3468 err = pci_write_config_word(dev, 0x48, rcc);
3470 pci_err(dev, "Error attempting to write the read completion coalescing register\n");
3509 static void quirk_intel_ntb(struct pci_dev *dev)
3514 rc = pci_read_config_byte(dev, 0x00D0, &val);
3518 dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1;
3520 rc = pci_read_config_byte(dev, 0x00D1, &val);
3524 dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1;
3542 static void disable_igfx_irq(struct pci_dev *dev)
3544 void __iomem *regs = pci_iomap(dev, 0, 0);
3546 pci_warn(dev, "igfx quirk: Can't iomap PCI device\n");
3552 pci_warn(dev, "BIOS left Intel GPU interrupts enabled; disabling\n");
3557 pci_iounmap(dev, regs);
3571 static void quirk_remove_d3hot_delay(struct pci_dev *dev)
3573 dev->d3hot_delay = 0;
3607 static void quirk_broken_intx_masking(struct pci_dev *dev)
3609 dev->broken_intx_masking = 1;
3630 * Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking,
3738 static void quirk_no_bus_reset(struct pci_dev *dev)
3740 dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
3747 static void quirk_nvidia_no_bus_reset(struct pci_dev *dev)
3749 if ((dev->device & 0xffc0) == 0x2340)
3750 quirk_no_bus_reset(dev);
3786 static void quirk_no_pm_reset(struct pci_dev *dev)
3792 if (!pci_is_root_bus(dev->bus))
3793 dev->dev_flags |= PCI_DEV_FLAGS_NO_PM_RESET;
3859 static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
3865 if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
3877 bridge = ACPI_HANDLE(&dev->dev);
3892 pci_info(dev, "quirk: cutting power to Thunderbolt controller...\n");
3912 static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, bool probe)
3923 pcie_flr(dev);
3934 static int reset_ivb_igd(struct pci_dev *dev, bool probe)
3943 mmio_base = pci_iomap(dev, 0, 0);
3967 pci_warn(dev, "timeout during reset\n");
3972 pci_iounmap(dev, mmio_base);
3977 static int reset_chelsio_generic_dev(struct pci_dev *dev, bool probe)
3986 if ((dev->device & 0xf000) != 0x4000)
4002 pci_read_config_word(dev, PCI_COMMAND, &old_command);
4003 pci_write_config_word(dev, PCI_COMMAND,
4010 pci_save_state(dev);
4019 pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags);
4021 pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS,
4026 pcie_flr(dev);
4033 pci_restore_state(dev);
4034 pci_write_config_word(dev, PCI_COMMAND, old_command);
4055 static int nvme_disable_and_flr(struct pci_dev *dev, bool probe)
4061 if (dev->class != PCI_CLASS_STORAGE_EXPRESS ||
4062 pcie_reset_flr(dev, PCI_RESET_PROBE) || !pci_resource_start(dev, 0))
4068 bar = pci_iomap(dev, 0, NVME_REG_CC + sizeof(cfg));
4072 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4073 pci_write_config_word(dev, PCI_COMMAND, cmd | PCI_COMMAND_MEMORY);
4110 pci_warn(dev, "Timeout waiting for NVMe ready status to clear after disable\n");
4116 pci_iounmap(dev, bar);
4118 pcie_flr(dev);
4130 static int delay_250ms_after_flr(struct pci_dev *dev, bool probe)
4133 return pcie_reset_flr(dev, PCI_RESET_PROBE);
4135 pcie_reset_flr(dev, PCI_RESET_DO_RESET);
4197 pci_warn(pdev, "Reset dev timeout, FLR ack reg: %#010x\n", val);
4228 int pci_dev_specific_reset(struct pci_dev *dev, bool probe)
4233 if ((i->vendor == dev->vendor ||
4235 (i->device == dev->device ||
4237 return i->reset(dev, probe);
4243 static void quirk_dma_func0_alias(struct pci_dev *dev)
4245 if (PCI_FUNC(dev->devfn) != 0)
4246 pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0), 1);
4261 static void quirk_dma_func1_alias(struct pci_dev *dev)
4263 if (PCI_FUNC(dev->devfn) != 1)
4264 pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1), 1);
4351 static void quirk_fixed_dma_alias(struct pci_dev *dev)
4355 id = pci_match_id(fixed_dma_alias_tbl, dev);
4357 pci_add_dma_alias(dev, id->driver_data, 1);
4475 static void quirk_relaxedordering_disable(struct pci_dev *dev)
4477 dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING;
4478 pci_info(dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n");
4592 dev_name(&pdev->dev));
4623 * Return 1 if all ACS controls in the @acs_ctrl_req bitmask are included
4624 * in @acs_ctrl_ena, i.e., the device provides all the access controls the
4640 * redirect (CR) since all transactions are redirected to the upstream
4659 static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
4666 if (!dev->multifunction || !pci_is_root_bus(dev->bus))
4685 static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
4687 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4690 switch (dev->device) {
4692 * Effectively selects all downstream ports for whole ThunderX1
4704 static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
4706 if (!pci_quirk_cavium_acs_match(dev))
4721 static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
4737 static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
4739 if (!pci_is_pcie(dev) ||
4740 ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
4741 (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
4748 switch (dev->device) {
4793 static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
4798 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4802 if (pci_quirk_intel_pch_acs_ids[i] == dev->device)
4808 static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
4810 if (!pci_quirk_intel_pch_acs_match(dev))
4813 if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK)
4830 static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
4842 static int pci_quirk_nxp_rp_acs(struct pci_dev *dev, u16 acs_flags)
4848 static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
4850 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4911 static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
4913 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4916 switch (dev->device) {
4928 static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
4933 if (!pci_quirk_intel_spt_pch_acs_match(dev))
4936 pos = dev->acs_cap;
4941 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
4944 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
4949 static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
4965 static int pci_quirk_rciep_acs(struct pci_dev *dev, u16 acs_flags)
4972 if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END)
4979 static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
4994 * directing all peer-to-peer traffic upstream as though PCI_ACS_RR and
5000 static int pci_quirk_wangxun_nic_acs(struct pci_dev *dev, u16 acs_flags)
5002 switch (dev->device) {
5017 int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
5172 * @dev: PCI device
5178 * 0: Device does not provide all the desired controls
5179 * >0: Device provides all the controls in @acs_flags
5181 int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
5193 if ((i->vendor == dev->vendor ||
5195 (i->device == dev->device ||
5197 ret = i->acs_enabled(dev, acs_flags);
5225 static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
5235 pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0),
5257 pci_info(dev, "Disabling UPDCR peer decodes\n");
5272 static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev)
5282 pci_read_config_dword(dev, INTEL_MPC_REG, &mpc);
5284 pci_info(dev, "Enabling MPC IRBNCE\n");
5286 pci_write_config_word(dev, INTEL_MPC_REG, mpc);
5295 * if dev->external_facing || dev->untrusted
5297 static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
5299 if (!pci_quirk_intel_pch_acs_match(dev))
5302 if (pci_quirk_enable_intel_lpc_acs(dev)) {
5303 pci_warn(dev, "Failed to enable Intel PCH ACS quirk\n");
5307 pci_quirk_enable_intel_rp_mpc_acs(dev);
5309 dev->dev_flags |= PCI_DEV_FLAGS_ACS_ENABLED_QUIRK;
5311 pci_info(dev, "Intel PCH root port ACS workaround enabled\n");
5316 static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
5321 if (!pci_quirk_intel_spt_pch_acs_match(dev))
5324 pos = dev->acs_cap;
5328 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
5329 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
5336 if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
5339 pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
5341 pci_info(dev, "Intel SPT PCH root port ACS workaround enabled\n");
5346 static int pci_quirk_disable_intel_spt_pch_acs_redir(struct pci_dev *dev)
5351 if (!pci_quirk_intel_spt_pch_acs_match(dev))
5354 pos = dev->acs_cap;
5358 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
5359 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
5363 pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
5365 pci_info(dev, "Intel SPT PCH root port workaround: disabled ACS redirect\n");
5373 int (*enable_acs)(struct pci_dev *dev);
5374 int (*disable_acs_redir)(struct pci_dev *dev);
5385 int pci_dev_specific_enable_acs(struct pci_dev *dev)
5392 if ((p->vendor == dev->vendor ||
5394 (p->device == dev->device ||
5397 ret = p->enable_acs(dev);
5406 int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
5413 if ((p->vendor == dev->vendor ||
5415 (p->device == dev->device ||
5418 ret = p->disable_acs_redir(dev);
5520 static void quirk_no_flr(struct pci_dev *dev)
5522 dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
5532 static void quirk_no_flr_snet(struct pci_dev *dev)
5534 if (dev->revision == 0x1)
5535 quirk_no_flr(dev);
5660 if (device_link_add(&pdev->dev, &supplier_pdev->dev,
5668 pm_runtime_allow(&pdev->dev);
5792 /* Write Vendor ID (read-only) so the endpoint latches its bus/dev */
5807 * ports. Therefore, all proxy IDs must be aliased to the NTB device
5984 * side of the NTB. Alias all possible IDs to the NTB to permit access when
5990 /* PLX NTB may use all 256 devfns */
6006 * clean state and fixes all these issues.
6058 static void pci_fixup_no_d0_pme(struct pci_dev *dev)
6060 pci_info(dev, "PME# does not work under D0, disabling it\n");
6061 dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
6068 * These devices advertise PME# support in all power states but don't
6075 static void pci_fixup_no_msi_no_pme(struct pci_dev *dev)
6078 pci_info(dev, "MSI is not implemented on this device, disabling it\n");
6079 dev->no_msi = 1;
6081 pci_info(dev, "PME# is unreliable, disabling it\n");
6082 dev->pme_support = 0;
6155 static void rom_bar_overlap_defect(struct pci_dev *dev)
6157 pci_info(dev, "working around ROM BAR overlap defect\n");
6158 dev->rom_bar_overlap = 1;
6173 static void aspm_l1_acceptable_latency(struct pci_dev *dev)
6175 u32 l1_lat = FIELD_GET(PCI_EXP_DEVCAP_L1, dev->devcap);
6178 dev->devcap |= FIELD_PREP(PCI_EXP_DEVCAP_L1, 7);
6179 pci_info(dev, "ASPM: overriding L1 acceptable latency from %#x to 0x7\n",
6217 static void dpc_log_size(struct pci_dev *dev)
6221 dpc = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC);
6225 pci_read_config_word(dev, dpc + PCI_EXP_DPC_CAP, &val);
6230 pci_info(dev, "Overriding RP PIO Log Size to 4\n");
6231 dev->dpc_rp_log_size = 4;
6257 * To overlay the flattened device tree, the PCI device and all its ancestor