1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This file contains work-arounds for x86 and x86_64 platform bugs. 4 */ 5 #include <linux/dmi.h> 6 #include <linux/pci.h> 7 #include <linux/irq.h> 8 9 #include <asm/hpet.h> 10 #include <asm/setup.h> 11 12 #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI) 13 14 static void quirk_intel_irqbalance(struct pci_dev *dev) 15 { 16 u8 config; 17 u16 word; 18 19 /* BIOS may enable hardware IRQ balancing for 20 * E7520/E7320/E7525(revision ID 0x9 and below) 21 * based platforms. 22 * Disable SW irqbalance/affinity on those platforms. 23 */ 24 if (dev->revision > 0x9) 25 return; 26 27 /* enable access to config space*/ 28 pci_read_config_byte(dev, 0xf4, &config); 29 pci_write_config_byte(dev, 0xf4, config|0x2); 30 31 /* 32 * read xTPR register. We may not have a pci_dev for device 8 33 * because it might be hidden until the above write. 34 */ 35 pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word); 36 37 if (!(word & (1 << 13))) { 38 dev_info(&dev->dev, "Intel E7520/7320/7525 detected; " 39 "disabling irq balancing and affinity\n"); 40 noirqdebug_setup(""); 41 #ifdef CONFIG_PROC_FS 42 no_irq_affinity = 1; 43 #endif 44 } 45 46 /* put back the original value for config space*/ 47 if (!(config & 0x2)) 48 pci_write_config_byte(dev, 0xf4, config); 49 } 50 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, 51 quirk_intel_irqbalance); 52 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, 53 quirk_intel_irqbalance); 54 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, 55 quirk_intel_irqbalance); 56 #endif 57 58 #if defined(CONFIG_HPET_TIMER) 59 unsigned long force_hpet_address; 60 61 static enum { 62 NONE_FORCE_HPET_RESUME, 63 OLD_ICH_FORCE_HPET_RESUME, 64 ICH_FORCE_HPET_RESUME, 65 VT8237_FORCE_HPET_RESUME, 66 NVIDIA_FORCE_HPET_RESUME, 67 ATI_FORCE_HPET_RESUME, 68 } force_hpet_resume_type; 69 70 static void __iomem *rcba_base; 71 72 static void ich_force_hpet_resume(void) 73 { 74 u32 val; 75 76 if (!force_hpet_address) 77 return; 78 79 BUG_ON(rcba_base == NULL); 80 81 /* read the Function Disable register, dword mode only */ 82 val = readl(rcba_base + 0x3404); 83 if (!(val & 0x80)) { 84 /* HPET disabled in HPTC. Trying to enable */ 85 writel(val | 0x80, rcba_base + 0x3404); 86 } 87 88 val = readl(rcba_base + 0x3404); 89 if (!(val & 0x80)) 90 BUG(); 91 else 92 printk(KERN_DEBUG "Force enabled HPET at resume\n"); 93 94 return; 95 } 96 97 static void ich_force_enable_hpet(struct pci_dev *dev) 98 { 99 u32 val; 100 u32 uninitialized_var(rcba); 101 int err = 0; 102 103 if (hpet_address || force_hpet_address) 104 return; 105 106 pci_read_config_dword(dev, 0xF0, &rcba); 107 rcba &= 0xFFFFC000; 108 if (rcba == 0) { 109 dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; " 110 "cannot force enable HPET\n"); 111 return; 112 } 113 114 /* use bits 31:14, 16 kB aligned */ 115 rcba_base = ioremap_nocache(rcba, 0x4000); 116 if (rcba_base == NULL) { 117 dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; " 118 "cannot force enable HPET\n"); 119 return; 120 } 121 122 /* read the Function Disable register, dword mode only */ 123 val = readl(rcba_base + 0x3404); 124 125 if (val & 0x80) { 126 /* HPET is enabled in HPTC. Just not reported by BIOS */ 127 val = val & 0x3; 128 force_hpet_address = 0xFED00000 | (val << 12); 129 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " 130 "0x%lx\n", force_hpet_address); 131 iounmap(rcba_base); 132 return; 133 } 134 135 /* HPET disabled in HPTC. Trying to enable */ 136 writel(val | 0x80, rcba_base + 0x3404); 137 138 val = readl(rcba_base + 0x3404); 139 if (!(val & 0x80)) { 140 err = 1; 141 } else { 142 val = val & 0x3; 143 force_hpet_address = 0xFED00000 | (val << 12); 144 } 145 146 if (err) { 147 force_hpet_address = 0; 148 iounmap(rcba_base); 149 dev_printk(KERN_DEBUG, &dev->dev, 150 "Failed to force enable HPET\n"); 151 } else { 152 force_hpet_resume_type = ICH_FORCE_HPET_RESUME; 153 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " 154 "0x%lx\n", force_hpet_address); 155 } 156 } 157 158 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, 159 ich_force_enable_hpet); 160 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, 161 ich_force_enable_hpet); 162 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, 163 ich_force_enable_hpet); 164 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, 165 ich_force_enable_hpet); 166 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, 167 ich_force_enable_hpet); 168 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, 169 ich_force_enable_hpet); 170 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, 171 ich_force_enable_hpet); 172 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, 173 ich_force_enable_hpet); 174 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, 175 ich_force_enable_hpet); 176 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16, /* ICH10 */ 177 ich_force_enable_hpet); 178 179 static struct pci_dev *cached_dev; 180 181 static void hpet_print_force_info(void) 182 { 183 printk(KERN_INFO "HPET not enabled in BIOS. " 184 "You might try hpet=force boot option\n"); 185 } 186 187 static void old_ich_force_hpet_resume(void) 188 { 189 u32 val; 190 u32 uninitialized_var(gen_cntl); 191 192 if (!force_hpet_address || !cached_dev) 193 return; 194 195 pci_read_config_dword(cached_dev, 0xD0, &gen_cntl); 196 gen_cntl &= (~(0x7 << 15)); 197 gen_cntl |= (0x4 << 15); 198 199 pci_write_config_dword(cached_dev, 0xD0, gen_cntl); 200 pci_read_config_dword(cached_dev, 0xD0, &gen_cntl); 201 val = gen_cntl >> 15; 202 val &= 0x7; 203 if (val == 0x4) 204 printk(KERN_DEBUG "Force enabled HPET at resume\n"); 205 else 206 BUG(); 207 } 208 209 static void old_ich_force_enable_hpet(struct pci_dev *dev) 210 { 211 u32 val; 212 u32 uninitialized_var(gen_cntl); 213 214 if (hpet_address || force_hpet_address) 215 return; 216 217 pci_read_config_dword(dev, 0xD0, &gen_cntl); 218 /* 219 * Bit 17 is HPET enable bit. 220 * Bit 16:15 control the HPET base address. 221 */ 222 val = gen_cntl >> 15; 223 val &= 0x7; 224 if (val & 0x4) { 225 val &= 0x3; 226 force_hpet_address = 0xFED00000 | (val << 12); 227 dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n", 228 force_hpet_address); 229 return; 230 } 231 232 /* 233 * HPET is disabled. Trying enabling at FED00000 and check 234 * whether it sticks 235 */ 236 gen_cntl &= (~(0x7 << 15)); 237 gen_cntl |= (0x4 << 15); 238 pci_write_config_dword(dev, 0xD0, gen_cntl); 239 240 pci_read_config_dword(dev, 0xD0, &gen_cntl); 241 242 val = gen_cntl >> 15; 243 val &= 0x7; 244 if (val & 0x4) { 245 /* HPET is enabled in HPTC. Just not reported by BIOS */ 246 val &= 0x3; 247 force_hpet_address = 0xFED00000 | (val << 12); 248 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " 249 "0x%lx\n", force_hpet_address); 250 cached_dev = dev; 251 force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME; 252 return; 253 } 254 255 dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n"); 256 } 257 258 /* 259 * Undocumented chipset features. Make sure that the user enforced 260 * this. 261 */ 262 static void old_ich_force_enable_hpet_user(struct pci_dev *dev) 263 { 264 if (hpet_force_user) 265 old_ich_force_enable_hpet(dev); 266 } 267 268 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, 269 old_ich_force_enable_hpet_user); 270 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, 271 old_ich_force_enable_hpet_user); 272 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, 273 old_ich_force_enable_hpet_user); 274 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, 275 old_ich_force_enable_hpet_user); 276 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, 277 old_ich_force_enable_hpet_user); 278 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, 279 old_ich_force_enable_hpet); 280 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12, 281 old_ich_force_enable_hpet); 282 283 284 static void vt8237_force_hpet_resume(void) 285 { 286 u32 val; 287 288 if (!force_hpet_address || !cached_dev) 289 return; 290 291 val = 0xfed00000 | 0x80; 292 pci_write_config_dword(cached_dev, 0x68, val); 293 294 pci_read_config_dword(cached_dev, 0x68, &val); 295 if (val & 0x80) 296 printk(KERN_DEBUG "Force enabled HPET at resume\n"); 297 else 298 BUG(); 299 } 300 301 static void vt8237_force_enable_hpet(struct pci_dev *dev) 302 { 303 u32 uninitialized_var(val); 304 305 if (hpet_address || force_hpet_address) 306 return; 307 308 if (!hpet_force_user) { 309 hpet_print_force_info(); 310 return; 311 } 312 313 pci_read_config_dword(dev, 0x68, &val); 314 /* 315 * Bit 7 is HPET enable bit. 316 * Bit 31:10 is HPET base address (contrary to what datasheet claims) 317 */ 318 if (val & 0x80) { 319 force_hpet_address = (val & ~0x3ff); 320 dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n", 321 force_hpet_address); 322 return; 323 } 324 325 /* 326 * HPET is disabled. Trying enabling at FED00000 and check 327 * whether it sticks 328 */ 329 val = 0xfed00000 | 0x80; 330 pci_write_config_dword(dev, 0x68, val); 331 332 pci_read_config_dword(dev, 0x68, &val); 333 if (val & 0x80) { 334 force_hpet_address = (val & ~0x3ff); 335 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " 336 "0x%lx\n", force_hpet_address); 337 cached_dev = dev; 338 force_hpet_resume_type = VT8237_FORCE_HPET_RESUME; 339 return; 340 } 341 342 dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n"); 343 } 344 345 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, 346 vt8237_force_enable_hpet); 347 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, 348 vt8237_force_enable_hpet); 349 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700, 350 vt8237_force_enable_hpet); 351 352 static void ati_force_hpet_resume(void) 353 { 354 pci_write_config_dword(cached_dev, 0x14, 0xfed00000); 355 printk(KERN_DEBUG "Force enabled HPET at resume\n"); 356 } 357 358 static u32 ati_ixp4x0_rev(struct pci_dev *dev) 359 { 360 int err = 0; 361 u32 d = 0; 362 u8 b = 0; 363 364 err = pci_read_config_byte(dev, 0xac, &b); 365 b &= ~(1<<5); 366 err |= pci_write_config_byte(dev, 0xac, b); 367 err |= pci_read_config_dword(dev, 0x70, &d); 368 d |= 1<<8; 369 err |= pci_write_config_dword(dev, 0x70, d); 370 err |= pci_read_config_dword(dev, 0x8, &d); 371 d &= 0xff; 372 dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d); 373 374 WARN_ON_ONCE(err); 375 376 return d; 377 } 378 379 static void ati_force_enable_hpet(struct pci_dev *dev) 380 { 381 u32 d, val; 382 u8 b; 383 384 if (hpet_address || force_hpet_address) 385 return; 386 387 if (!hpet_force_user) { 388 hpet_print_force_info(); 389 return; 390 } 391 392 d = ati_ixp4x0_rev(dev); 393 if (d < 0x82) 394 return; 395 396 /* base address */ 397 pci_write_config_dword(dev, 0x14, 0xfed00000); 398 pci_read_config_dword(dev, 0x14, &val); 399 400 /* enable interrupt */ 401 outb(0x72, 0xcd6); b = inb(0xcd7); 402 b |= 0x1; 403 outb(0x72, 0xcd6); outb(b, 0xcd7); 404 outb(0x72, 0xcd6); b = inb(0xcd7); 405 if (!(b & 0x1)) 406 return; 407 pci_read_config_dword(dev, 0x64, &d); 408 d |= (1<<10); 409 pci_write_config_dword(dev, 0x64, d); 410 pci_read_config_dword(dev, 0x64, &d); 411 if (!(d & (1<<10))) 412 return; 413 414 force_hpet_address = val; 415 force_hpet_resume_type = ATI_FORCE_HPET_RESUME; 416 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n", 417 force_hpet_address); 418 cached_dev = dev; 419 } 420 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS, 421 ati_force_enable_hpet); 422 423 /* 424 * Undocumented chipset feature taken from LinuxBIOS. 425 */ 426 static void nvidia_force_hpet_resume(void) 427 { 428 pci_write_config_dword(cached_dev, 0x44, 0xfed00001); 429 printk(KERN_DEBUG "Force enabled HPET at resume\n"); 430 } 431 432 static void nvidia_force_enable_hpet(struct pci_dev *dev) 433 { 434 u32 uninitialized_var(val); 435 436 if (hpet_address || force_hpet_address) 437 return; 438 439 if (!hpet_force_user) { 440 hpet_print_force_info(); 441 return; 442 } 443 444 pci_write_config_dword(dev, 0x44, 0xfed00001); 445 pci_read_config_dword(dev, 0x44, &val); 446 force_hpet_address = val & 0xfffffffe; 447 force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME; 448 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n", 449 force_hpet_address); 450 cached_dev = dev; 451 return; 452 } 453 454 /* ISA Bridges */ 455 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050, 456 nvidia_force_enable_hpet); 457 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051, 458 nvidia_force_enable_hpet); 459 460 /* LPC bridges */ 461 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260, 462 nvidia_force_enable_hpet); 463 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360, 464 nvidia_force_enable_hpet); 465 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361, 466 nvidia_force_enable_hpet); 467 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362, 468 nvidia_force_enable_hpet); 469 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363, 470 nvidia_force_enable_hpet); 471 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364, 472 nvidia_force_enable_hpet); 473 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365, 474 nvidia_force_enable_hpet); 475 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366, 476 nvidia_force_enable_hpet); 477 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367, 478 nvidia_force_enable_hpet); 479 480 void force_hpet_resume(void) 481 { 482 switch (force_hpet_resume_type) { 483 case ICH_FORCE_HPET_RESUME: 484 ich_force_hpet_resume(); 485 return; 486 case OLD_ICH_FORCE_HPET_RESUME: 487 old_ich_force_hpet_resume(); 488 return; 489 case VT8237_FORCE_HPET_RESUME: 490 vt8237_force_hpet_resume(); 491 return; 492 case NVIDIA_FORCE_HPET_RESUME: 493 nvidia_force_hpet_resume(); 494 return; 495 case ATI_FORCE_HPET_RESUME: 496 ati_force_hpet_resume(); 497 return; 498 default: 499 break; 500 } 501 } 502 503 /* 504 * According to the datasheet e6xx systems have the HPET hardwired to 505 * 0xfed00000 506 */ 507 static void e6xx_force_enable_hpet(struct pci_dev *dev) 508 { 509 if (hpet_address || force_hpet_address) 510 return; 511 512 force_hpet_address = 0xFED00000; 513 force_hpet_resume_type = NONE_FORCE_HPET_RESUME; 514 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " 515 "0x%lx\n", force_hpet_address); 516 return; 517 } 518 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU, 519 e6xx_force_enable_hpet); 520 521 /* 522 * HPET MSI on some boards (ATI SB700/SB800) has side effect on 523 * floppy DMA. Disable HPET MSI on such platforms. 524 * See erratum #27 (Misinterpreted MSI Requests May Result in 525 * Corrupted LPC DMA Data) in AMD Publication #46837, 526 * "SB700 Family Product Errata", Rev. 1.0, March 2010. 527 */ 528 static void force_disable_hpet_msi(struct pci_dev *unused) 529 { 530 hpet_msi_disable = true; 531 } 532 533 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, 534 force_disable_hpet_msi); 535 536 #endif 537 538 #if defined(CONFIG_PCI) && defined(CONFIG_NUMA) 539 /* Set correct numa_node information for AMD NB functions */ 540 static void quirk_amd_nb_node(struct pci_dev *dev) 541 { 542 struct pci_dev *nb_ht; 543 unsigned int devfn; 544 u32 node; 545 u32 val; 546 547 devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0); 548 nb_ht = pci_get_slot(dev->bus, devfn); 549 if (!nb_ht) 550 return; 551 552 pci_read_config_dword(nb_ht, 0x60, &val); 553 node = pcibus_to_node(dev->bus) | (val & 7); 554 /* 555 * Some hardware may return an invalid node ID, 556 * so check it first: 557 */ 558 if (node_online(node)) 559 set_dev_node(&dev->dev, node); 560 pci_dev_put(nb_ht); 561 } 562 563 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB, 564 quirk_amd_nb_node); 565 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, 566 quirk_amd_nb_node); 567 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, 568 quirk_amd_nb_node); 569 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC, 570 quirk_amd_nb_node); 571 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_HT, 572 quirk_amd_nb_node); 573 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MAP, 574 quirk_amd_nb_node); 575 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM, 576 quirk_amd_nb_node); 577 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC, 578 quirk_amd_nb_node); 579 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK, 580 quirk_amd_nb_node); 581 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0, 582 quirk_amd_nb_node); 583 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1, 584 quirk_amd_nb_node); 585 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2, 586 quirk_amd_nb_node); 587 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3, 588 quirk_amd_nb_node); 589 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4, 590 quirk_amd_nb_node); 591 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5, 592 quirk_amd_nb_node); 593 594 #endif 595 596 #ifdef CONFIG_PCI 597 /* 598 * Processor does not ensure DRAM scrub read/write sequence 599 * is atomic wrt accesses to CC6 save state area. Therefore 600 * if a concurrent scrub read/write access is to same address 601 * the entry may appear as if it is not written. This quirk 602 * applies to Fam16h models 00h-0Fh 603 * 604 * See "Revision Guide" for AMD F16h models 00h-0fh, 605 * document 51810 rev. 3.04, Nov 2013 606 */ 607 static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev) 608 { 609 u32 val; 610 611 /* 612 * Suggested workaround: 613 * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b 614 */ 615 pci_read_config_dword(dev, 0x58, &val); 616 if (val & 0x1F) { 617 val &= ~(0x1F); 618 pci_write_config_dword(dev, 0x58, val); 619 } 620 621 pci_read_config_dword(dev, 0x5C, &val); 622 if (val & BIT(0)) { 623 val &= ~BIT(0); 624 pci_write_config_dword(dev, 0x5c, val); 625 } 626 } 627 628 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3, 629 amd_disable_seq_and_redirect_scrub); 630 631 #if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE) 632 #include <linux/jump_label.h> 633 #include <asm/string_64.h> 634 635 /* Ivy Bridge, Haswell, Broadwell */ 636 static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev) 637 { 638 u32 capid0; 639 640 pci_read_config_dword(pdev, 0x84, &capid0); 641 642 if (capid0 & 0x10) 643 static_branch_inc(&mcsafe_key); 644 } 645 646 /* Skylake */ 647 static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev) 648 { 649 u32 capid0, capid5; 650 651 pci_read_config_dword(pdev, 0x84, &capid0); 652 pci_read_config_dword(pdev, 0x98, &capid5); 653 654 /* 655 * CAPID0{7:6} indicate whether this is an advanced RAS SKU 656 * CAPID5{8:5} indicate that various NVDIMM usage modes are 657 * enabled, so memory machine check recovery is also enabled. 658 */ 659 if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0)) 660 static_branch_inc(&mcsafe_key); 661 662 } 663 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap); 664 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap); 665 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap); 666 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap); 667 #endif 668 #endif 669 670 bool x86_apple_machine; 671 EXPORT_SYMBOL(x86_apple_machine); 672 673 void __init early_platform_quirks(void) 674 { 675 x86_apple_machine = dmi_match(DMI_SYS_VENDOR, "Apple Inc.") || 676 dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc."); 677 } 678