1 /* Various workarounds for chipset bugs. 2 This code runs very early and can't use the regular PCI subsystem 3 The entries are keyed to PCI bridges which usually identify chipsets 4 uniquely. 5 This is only for whole classes of chipsets with specific problems which 6 need early invasive action (e.g. before the timers are initialized). 7 Most PCI device specific workarounds can be done later and should be 8 in standard PCI quirks 9 Mainboard specific bugs should be handled by DMI entries. 10 CPU specific bugs in setup.c */ 11 12 #include <linux/pci.h> 13 #include <linux/acpi.h> 14 #include <linux/pci_ids.h> 15 #include <drm/i915_drm.h> 16 #include <asm/pci-direct.h> 17 #include <asm/dma.h> 18 #include <asm/io_apic.h> 19 #include <asm/apic.h> 20 #include <asm/hpet.h> 21 #include <asm/iommu.h> 22 #include <asm/gart.h> 23 #include <asm/irq_remapping.h> 24 25 static void __init fix_hypertransport_config(int num, int slot, int func) 26 { 27 u32 htcfg; 28 /* 29 * we found a hypertransport bus 30 * make sure that we are broadcasting 31 * interrupts to all cpus on the ht bus 32 * if we're using extended apic ids 33 */ 34 htcfg = read_pci_config(num, slot, func, 0x68); 35 if (htcfg & (1 << 18)) { 36 printk(KERN_INFO "Detected use of extended apic ids " 37 "on hypertransport bus\n"); 38 if ((htcfg & (1 << 17)) == 0) { 39 printk(KERN_INFO "Enabling hypertransport extended " 40 "apic interrupt broadcast\n"); 41 printk(KERN_INFO "Note this is a bios bug, " 42 "please contact your hw vendor\n"); 43 htcfg |= (1 << 17); 44 write_pci_config(num, slot, func, 0x68, htcfg); 45 } 46 } 47 48 49 } 50 51 static void __init via_bugs(int num, int slot, int func) 52 { 53 #ifdef CONFIG_GART_IOMMU 54 if ((max_pfn > MAX_DMA32_PFN || force_iommu) && 55 !gart_iommu_aperture_allowed) { 56 printk(KERN_INFO 57 "Looks like a VIA chipset. Disabling IOMMU." 58 " Override with iommu=allowed\n"); 59 gart_iommu_aperture_disabled = 1; 60 } 61 #endif 62 } 63 64 #ifdef CONFIG_ACPI 65 #ifdef CONFIG_X86_IO_APIC 66 67 static int __init nvidia_hpet_check(struct acpi_table_header *header) 68 { 69 return 0; 70 } 71 #endif /* CONFIG_X86_IO_APIC */ 72 #endif /* CONFIG_ACPI */ 73 74 static void __init nvidia_bugs(int num, int slot, int func) 75 { 76 #ifdef CONFIG_ACPI 77 #ifdef CONFIG_X86_IO_APIC 78 /* 79 * All timer overrides on Nvidia are 80 * wrong unless HPET is enabled. 81 * Unfortunately that's not true on many Asus boards. 82 * We don't know yet how to detect this automatically, but 83 * at least allow a command line override. 84 */ 85 if (acpi_use_timer_override) 86 return; 87 88 if (acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check)) { 89 acpi_skip_timer_override = 1; 90 printk(KERN_INFO "Nvidia board " 91 "detected. Ignoring ACPI " 92 "timer override.\n"); 93 printk(KERN_INFO "If you got timer trouble " 94 "try acpi_use_timer_override\n"); 95 } 96 #endif 97 #endif 98 /* RED-PEN skip them on mptables too? */ 99 100 } 101 102 #if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC) 103 static u32 __init ati_ixp4x0_rev(int num, int slot, int func) 104 { 105 u32 d; 106 u8 b; 107 108 b = read_pci_config_byte(num, slot, func, 0xac); 109 b &= ~(1<<5); 110 write_pci_config_byte(num, slot, func, 0xac, b); 111 112 d = read_pci_config(num, slot, func, 0x70); 113 d |= 1<<8; 114 write_pci_config(num, slot, func, 0x70, d); 115 116 d = read_pci_config(num, slot, func, 0x8); 117 d &= 0xff; 118 return d; 119 } 120 121 static void __init ati_bugs(int num, int slot, int func) 122 { 123 u32 d; 124 u8 b; 125 126 if (acpi_use_timer_override) 127 return; 128 129 d = ati_ixp4x0_rev(num, slot, func); 130 if (d < 0x82) 131 acpi_skip_timer_override = 1; 132 else { 133 /* check for IRQ0 interrupt swap */ 134 outb(0x72, 0xcd6); b = inb(0xcd7); 135 if (!(b & 0x2)) 136 acpi_skip_timer_override = 1; 137 } 138 139 if (acpi_skip_timer_override) { 140 printk(KERN_INFO "SB4X0 revision 0x%x\n", d); 141 printk(KERN_INFO "Ignoring ACPI timer override.\n"); 142 printk(KERN_INFO "If you got timer trouble " 143 "try acpi_use_timer_override\n"); 144 } 145 } 146 147 static u32 __init ati_sbx00_rev(int num, int slot, int func) 148 { 149 u32 d; 150 151 d = read_pci_config(num, slot, func, 0x8); 152 d &= 0xff; 153 154 return d; 155 } 156 157 static void __init ati_bugs_contd(int num, int slot, int func) 158 { 159 u32 d, rev; 160 161 rev = ati_sbx00_rev(num, slot, func); 162 if (rev >= 0x40) 163 acpi_fix_pin2_polarity = 1; 164 165 /* 166 * SB600: revisions 0x11, 0x12, 0x13, 0x14, ... 167 * SB700: revisions 0x39, 0x3a, ... 168 * SB800: revisions 0x40, 0x41, ... 169 */ 170 if (rev >= 0x39) 171 return; 172 173 if (acpi_use_timer_override) 174 return; 175 176 /* check for IRQ0 interrupt swap */ 177 d = read_pci_config(num, slot, func, 0x64); 178 if (!(d & (1<<14))) 179 acpi_skip_timer_override = 1; 180 181 if (acpi_skip_timer_override) { 182 printk(KERN_INFO "SB600 revision 0x%x\n", rev); 183 printk(KERN_INFO "Ignoring ACPI timer override.\n"); 184 printk(KERN_INFO "If you got timer trouble " 185 "try acpi_use_timer_override\n"); 186 } 187 } 188 #else 189 static void __init ati_bugs(int num, int slot, int func) 190 { 191 } 192 193 static void __init ati_bugs_contd(int num, int slot, int func) 194 { 195 } 196 #endif 197 198 static void __init intel_remapping_check(int num, int slot, int func) 199 { 200 u8 revision; 201 u16 device; 202 203 device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); 204 revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); 205 206 /* 207 * Revision <= 13 of all triggering devices id in this quirk 208 * have a problem draining interrupts when irq remapping is 209 * enabled, and should be flagged as broken. Additionally 210 * revision 0x22 of device id 0x3405 has this problem. 211 */ 212 if (revision <= 0x13) 213 set_irq_remapping_broken(); 214 else if (device == 0x3405 && revision == 0x22) 215 set_irq_remapping_broken(); 216 } 217 218 /* 219 * Systems with Intel graphics controllers set aside memory exclusively 220 * for gfx driver use. This memory is not marked in the E820 as reserved 221 * or as RAM, and so is subject to overlap from E820 manipulation later 222 * in the boot process. On some systems, MMIO space is allocated on top, 223 * despite the efforts of the "RAM buffer" approach, which simply rounds 224 * memory boundaries up to 64M to try to catch space that may decode 225 * as RAM and so is not suitable for MMIO. 226 * 227 * And yes, so far on current devices the base addr is always under 4G. 228 */ 229 static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_size) 230 { 231 u32 base; 232 233 /* 234 * For the PCI IDs in this quirk, the stolen base is always 235 * in 0x5c, aka the BDSM register (yes that's really what 236 * it's called). 237 */ 238 base = read_pci_config(num, slot, func, 0x5c); 239 base &= ~((1<<20) - 1); 240 241 return base; 242 } 243 244 #define KB(x) ((x) * 1024UL) 245 #define MB(x) (KB (KB (x))) 246 #define GB(x) (MB (KB (x))) 247 248 static size_t __init i830_tseg_size(void) 249 { 250 u8 tmp = read_pci_config_byte(0, 0, 0, I830_ESMRAMC); 251 252 if (!(tmp & TSEG_ENABLE)) 253 return 0; 254 255 if (tmp & I830_TSEG_SIZE_1M) 256 return MB(1); 257 else 258 return KB(512); 259 } 260 261 static size_t __init i845_tseg_size(void) 262 { 263 u8 tmp = read_pci_config_byte(0, 0, 0, I845_ESMRAMC); 264 265 if (!(tmp & TSEG_ENABLE)) 266 return 0; 267 268 switch (tmp & I845_TSEG_SIZE_MASK) { 269 case I845_TSEG_SIZE_512K: 270 return KB(512); 271 case I845_TSEG_SIZE_1M: 272 return MB(1); 273 default: 274 WARN_ON(1); 275 return 0; 276 } 277 } 278 279 static size_t __init i85x_tseg_size(void) 280 { 281 u8 tmp = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC); 282 283 if (!(tmp & TSEG_ENABLE)) 284 return 0; 285 286 return MB(1); 287 } 288 289 static size_t __init i830_mem_size(void) 290 { 291 return read_pci_config_byte(0, 0, 0, I830_DRB3) * MB(32); 292 } 293 294 static size_t __init i85x_mem_size(void) 295 { 296 return read_pci_config_byte(0, 0, 1, I85X_DRB3) * MB(32); 297 } 298 299 /* 300 * On 830/845/85x the stolen memory base isn't available in any 301 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size. 302 */ 303 static u32 __init i830_stolen_base(int num, int slot, int func, size_t stolen_size) 304 { 305 return i830_mem_size() - i830_tseg_size() - stolen_size; 306 } 307 308 static u32 __init i845_stolen_base(int num, int slot, int func, size_t stolen_size) 309 { 310 return i830_mem_size() - i845_tseg_size() - stolen_size; 311 } 312 313 static u32 __init i85x_stolen_base(int num, int slot, int func, size_t stolen_size) 314 { 315 return i85x_mem_size() - i85x_tseg_size() - stolen_size; 316 } 317 318 static u32 __init i865_stolen_base(int num, int slot, int func, size_t stolen_size) 319 { 320 /* 321 * FIXME is the graphics stolen memory region 322 * always at TOUD? Ie. is it always the last 323 * one to be allocated by the BIOS? 324 */ 325 return read_pci_config_16(0, 0, 0, I865_TOUD) << 16; 326 } 327 328 static size_t __init i830_stolen_size(int num, int slot, int func) 329 { 330 size_t stolen_size; 331 u16 gmch_ctrl; 332 333 gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); 334 335 switch (gmch_ctrl & I830_GMCH_GMS_MASK) { 336 case I830_GMCH_GMS_STOLEN_512: 337 stolen_size = KB(512); 338 break; 339 case I830_GMCH_GMS_STOLEN_1024: 340 stolen_size = MB(1); 341 break; 342 case I830_GMCH_GMS_STOLEN_8192: 343 stolen_size = MB(8); 344 break; 345 case I830_GMCH_GMS_LOCAL: 346 /* local memory isn't part of the normal address space */ 347 stolen_size = 0; 348 break; 349 default: 350 return 0; 351 } 352 353 return stolen_size; 354 } 355 356 static size_t __init gen3_stolen_size(int num, int slot, int func) 357 { 358 size_t stolen_size; 359 u16 gmch_ctrl; 360 361 gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); 362 363 switch (gmch_ctrl & I855_GMCH_GMS_MASK) { 364 case I855_GMCH_GMS_STOLEN_1M: 365 stolen_size = MB(1); 366 break; 367 case I855_GMCH_GMS_STOLEN_4M: 368 stolen_size = MB(4); 369 break; 370 case I855_GMCH_GMS_STOLEN_8M: 371 stolen_size = MB(8); 372 break; 373 case I855_GMCH_GMS_STOLEN_16M: 374 stolen_size = MB(16); 375 break; 376 case I855_GMCH_GMS_STOLEN_32M: 377 stolen_size = MB(32); 378 break; 379 case I915_GMCH_GMS_STOLEN_48M: 380 stolen_size = MB(48); 381 break; 382 case I915_GMCH_GMS_STOLEN_64M: 383 stolen_size = MB(64); 384 break; 385 case G33_GMCH_GMS_STOLEN_128M: 386 stolen_size = MB(128); 387 break; 388 case G33_GMCH_GMS_STOLEN_256M: 389 stolen_size = MB(256); 390 break; 391 case INTEL_GMCH_GMS_STOLEN_96M: 392 stolen_size = MB(96); 393 break; 394 case INTEL_GMCH_GMS_STOLEN_160M: 395 stolen_size = MB(160); 396 break; 397 case INTEL_GMCH_GMS_STOLEN_224M: 398 stolen_size = MB(224); 399 break; 400 case INTEL_GMCH_GMS_STOLEN_352M: 401 stolen_size = MB(352); 402 break; 403 default: 404 stolen_size = 0; 405 break; 406 } 407 408 return stolen_size; 409 } 410 411 static size_t __init gen6_stolen_size(int num, int slot, int func) 412 { 413 u16 gmch_ctrl; 414 415 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 416 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; 417 gmch_ctrl &= SNB_GMCH_GMS_MASK; 418 419 return gmch_ctrl << 25; /* 32 MB units */ 420 } 421 422 static size_t gen8_stolen_size(int num, int slot, int func) 423 { 424 u16 gmch_ctrl; 425 426 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 427 gmch_ctrl >>= BDW_GMCH_GMS_SHIFT; 428 gmch_ctrl &= BDW_GMCH_GMS_MASK; 429 return gmch_ctrl << 25; /* 32 MB units */ 430 } 431 432 433 struct intel_stolen_funcs { 434 size_t (*size)(int num, int slot, int func); 435 u32 (*base)(int num, int slot, int func, size_t size); 436 }; 437 438 static const struct intel_stolen_funcs i830_stolen_funcs = { 439 .base = i830_stolen_base, 440 .size = i830_stolen_size, 441 }; 442 443 static const struct intel_stolen_funcs i845_stolen_funcs = { 444 .base = i845_stolen_base, 445 .size = i830_stolen_size, 446 }; 447 448 static const struct intel_stolen_funcs i85x_stolen_funcs = { 449 .base = i85x_stolen_base, 450 .size = gen3_stolen_size, 451 }; 452 453 static const struct intel_stolen_funcs i865_stolen_funcs = { 454 .base = i865_stolen_base, 455 .size = gen3_stolen_size, 456 }; 457 458 static const struct intel_stolen_funcs gen3_stolen_funcs = { 459 .base = intel_stolen_base, 460 .size = gen3_stolen_size, 461 }; 462 463 static const struct intel_stolen_funcs gen6_stolen_funcs = { 464 .base = intel_stolen_base, 465 .size = gen6_stolen_size, 466 }; 467 468 static const struct intel_stolen_funcs gen8_stolen_funcs = { 469 .base = intel_stolen_base, 470 .size = gen8_stolen_size, 471 }; 472 473 static struct pci_device_id intel_stolen_ids[] __initdata = { 474 INTEL_I830_IDS(&i830_stolen_funcs), 475 INTEL_I845G_IDS(&i845_stolen_funcs), 476 INTEL_I85X_IDS(&i85x_stolen_funcs), 477 INTEL_I865G_IDS(&i865_stolen_funcs), 478 INTEL_I915G_IDS(&gen3_stolen_funcs), 479 INTEL_I915GM_IDS(&gen3_stolen_funcs), 480 INTEL_I945G_IDS(&gen3_stolen_funcs), 481 INTEL_I945GM_IDS(&gen3_stolen_funcs), 482 INTEL_VLV_M_IDS(&gen6_stolen_funcs), 483 INTEL_VLV_D_IDS(&gen6_stolen_funcs), 484 INTEL_PINEVIEW_IDS(&gen3_stolen_funcs), 485 INTEL_I965G_IDS(&gen3_stolen_funcs), 486 INTEL_G33_IDS(&gen3_stolen_funcs), 487 INTEL_I965GM_IDS(&gen3_stolen_funcs), 488 INTEL_GM45_IDS(&gen3_stolen_funcs), 489 INTEL_G45_IDS(&gen3_stolen_funcs), 490 INTEL_IRONLAKE_D_IDS(&gen3_stolen_funcs), 491 INTEL_IRONLAKE_M_IDS(&gen3_stolen_funcs), 492 INTEL_SNB_D_IDS(&gen6_stolen_funcs), 493 INTEL_SNB_M_IDS(&gen6_stolen_funcs), 494 INTEL_IVB_M_IDS(&gen6_stolen_funcs), 495 INTEL_IVB_D_IDS(&gen6_stolen_funcs), 496 INTEL_HSW_D_IDS(&gen6_stolen_funcs), 497 INTEL_HSW_M_IDS(&gen6_stolen_funcs), 498 INTEL_BDW_M_IDS(&gen8_stolen_funcs), 499 INTEL_BDW_D_IDS(&gen8_stolen_funcs) 500 }; 501 502 static void __init intel_graphics_stolen(int num, int slot, int func) 503 { 504 size_t size; 505 int i; 506 u32 start; 507 u16 device, subvendor, subdevice; 508 509 device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); 510 subvendor = read_pci_config_16(num, slot, func, 511 PCI_SUBSYSTEM_VENDOR_ID); 512 subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID); 513 514 for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) { 515 if (intel_stolen_ids[i].device == device) { 516 const struct intel_stolen_funcs *stolen_funcs = 517 (const struct intel_stolen_funcs *)intel_stolen_ids[i].driver_data; 518 size = stolen_funcs->size(num, slot, func); 519 start = stolen_funcs->base(num, slot, func, size); 520 if (size && start) { 521 printk(KERN_INFO "Reserving Intel graphics stolen memory at 0x%x-0x%x\n", 522 start, start + (u32)size - 1); 523 /* Mark this space as reserved */ 524 e820_add_region(start, size, E820_RESERVED); 525 sanitize_e820_map(e820.map, 526 ARRAY_SIZE(e820.map), 527 &e820.nr_map); 528 } 529 return; 530 } 531 } 532 } 533 534 static void __init force_disable_hpet(int num, int slot, int func) 535 { 536 #ifdef CONFIG_HPET_TIMER 537 boot_hpet_disable = 1; 538 pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n"); 539 #endif 540 } 541 542 543 #define QFLAG_APPLY_ONCE 0x1 544 #define QFLAG_APPLIED 0x2 545 #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) 546 struct chipset { 547 u32 vendor; 548 u32 device; 549 u32 class; 550 u32 class_mask; 551 u32 flags; 552 void (*f)(int num, int slot, int func); 553 }; 554 555 /* 556 * Only works for devices on the root bus. If you add any devices 557 * not on bus 0 readd another loop level in early_quirks(). But 558 * be careful because at least the Nvidia quirk here relies on 559 * only matching on bus 0. 560 */ 561 static struct chipset early_qrk[] __initdata = { 562 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, 563 PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs }, 564 { PCI_VENDOR_ID_VIA, PCI_ANY_ID, 565 PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, via_bugs }, 566 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB, 567 PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, fix_hypertransport_config }, 568 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS, 569 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, 570 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, 571 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, 572 { PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST, 573 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 574 { PCI_VENDOR_ID_INTEL, 0x3405, PCI_CLASS_BRIDGE_HOST, 575 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 576 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, 577 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 578 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID, 579 QFLAG_APPLY_ONCE, intel_graphics_stolen }, 580 /* 581 * HPET on current version of Baytrail platform has accuracy 582 * problems, disable it for now: 583 */ 584 { PCI_VENDOR_ID_INTEL, 0x0f00, 585 PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, 586 {} 587 }; 588 589 /** 590 * check_dev_quirk - apply early quirks to a given PCI device 591 * @num: bus number 592 * @slot: slot number 593 * @func: PCI function 594 * 595 * Check the vendor & device ID against the early quirks table. 596 * 597 * If the device is single function, let early_quirks() know so we don't 598 * poke at this device again. 599 */ 600 static int __init check_dev_quirk(int num, int slot, int func) 601 { 602 u16 class; 603 u16 vendor; 604 u16 device; 605 u8 type; 606 int i; 607 608 class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE); 609 610 if (class == 0xffff) 611 return -1; /* no class, treat as single function */ 612 613 vendor = read_pci_config_16(num, slot, func, PCI_VENDOR_ID); 614 615 device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); 616 617 for (i = 0; early_qrk[i].f != NULL; i++) { 618 if (((early_qrk[i].vendor == PCI_ANY_ID) || 619 (early_qrk[i].vendor == vendor)) && 620 ((early_qrk[i].device == PCI_ANY_ID) || 621 (early_qrk[i].device == device)) && 622 (!((early_qrk[i].class ^ class) & 623 early_qrk[i].class_mask))) { 624 if ((early_qrk[i].flags & 625 QFLAG_DONE) != QFLAG_DONE) 626 early_qrk[i].f(num, slot, func); 627 early_qrk[i].flags |= QFLAG_APPLIED; 628 } 629 } 630 631 type = read_pci_config_byte(num, slot, func, 632 PCI_HEADER_TYPE); 633 if (!(type & 0x80)) 634 return -1; 635 636 return 0; 637 } 638 639 void __init early_quirks(void) 640 { 641 int slot, func; 642 643 if (!early_pci_allowed()) 644 return; 645 646 /* Poor man's PCI discovery */ 647 /* Only scan the root bus */ 648 for (slot = 0; slot < 32; slot++) 649 for (func = 0; func < 8; func++) { 650 /* Only probe function 0 on single fn devices */ 651 if (check_dev_quirk(0, slot, func)) 652 break; 653 } 654 } 655