1 /* Various workarounds for chipset bugs. 2 This code runs very early and can't use the regular PCI subsystem 3 The entries are keyed to PCI bridges which usually identify chipsets 4 uniquely. 5 This is only for whole classes of chipsets with specific problems which 6 need early invasive action (e.g. before the timers are initialized). 7 Most PCI device specific workarounds can be done later and should be 8 in standard PCI quirks 9 Mainboard specific bugs should be handled by DMI entries. 10 CPU specific bugs in setup.c */ 11 12 #include <linux/pci.h> 13 #include <linux/acpi.h> 14 #include <linux/pci_ids.h> 15 #include <drm/i915_drm.h> 16 #include <asm/pci-direct.h> 17 #include <asm/dma.h> 18 #include <asm/io_apic.h> 19 #include <asm/apic.h> 20 #include <asm/hpet.h> 21 #include <asm/iommu.h> 22 #include <asm/gart.h> 23 #include <asm/irq_remapping.h> 24 25 static void __init fix_hypertransport_config(int num, int slot, int func) 26 { 27 u32 htcfg; 28 /* 29 * we found a hypertransport bus 30 * make sure that we are broadcasting 31 * interrupts to all cpus on the ht bus 32 * if we're using extended apic ids 33 */ 34 htcfg = read_pci_config(num, slot, func, 0x68); 35 if (htcfg & (1 << 18)) { 36 printk(KERN_INFO "Detected use of extended apic ids " 37 "on hypertransport bus\n"); 38 if ((htcfg & (1 << 17)) == 0) { 39 printk(KERN_INFO "Enabling hypertransport extended " 40 "apic interrupt broadcast\n"); 41 printk(KERN_INFO "Note this is a bios bug, " 42 "please contact your hw vendor\n"); 43 htcfg |= (1 << 17); 44 write_pci_config(num, slot, func, 0x68, htcfg); 45 } 46 } 47 48 49 } 50 51 static void __init via_bugs(int num, int slot, int func) 52 { 53 #ifdef CONFIG_GART_IOMMU 54 if ((max_pfn > MAX_DMA32_PFN || force_iommu) && 55 !gart_iommu_aperture_allowed) { 56 printk(KERN_INFO 57 "Looks like a VIA chipset. Disabling IOMMU." 58 " Override with iommu=allowed\n"); 59 gart_iommu_aperture_disabled = 1; 60 } 61 #endif 62 } 63 64 #ifdef CONFIG_ACPI 65 #ifdef CONFIG_X86_IO_APIC 66 67 static int __init nvidia_hpet_check(struct acpi_table_header *header) 68 { 69 return 0; 70 } 71 #endif /* CONFIG_X86_IO_APIC */ 72 #endif /* CONFIG_ACPI */ 73 74 static void __init nvidia_bugs(int num, int slot, int func) 75 { 76 #ifdef CONFIG_ACPI 77 #ifdef CONFIG_X86_IO_APIC 78 /* 79 * All timer overrides on Nvidia are 80 * wrong unless HPET is enabled. 81 * Unfortunately that's not true on many Asus boards. 82 * We don't know yet how to detect this automatically, but 83 * at least allow a command line override. 84 */ 85 if (acpi_use_timer_override) 86 return; 87 88 if (acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check)) { 89 acpi_skip_timer_override = 1; 90 printk(KERN_INFO "Nvidia board " 91 "detected. Ignoring ACPI " 92 "timer override.\n"); 93 printk(KERN_INFO "If you got timer trouble " 94 "try acpi_use_timer_override\n"); 95 } 96 #endif 97 #endif 98 /* RED-PEN skip them on mptables too? */ 99 100 } 101 102 #if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC) 103 static u32 __init ati_ixp4x0_rev(int num, int slot, int func) 104 { 105 u32 d; 106 u8 b; 107 108 b = read_pci_config_byte(num, slot, func, 0xac); 109 b &= ~(1<<5); 110 write_pci_config_byte(num, slot, func, 0xac, b); 111 112 d = read_pci_config(num, slot, func, 0x70); 113 d |= 1<<8; 114 write_pci_config(num, slot, func, 0x70, d); 115 116 d = read_pci_config(num, slot, func, 0x8); 117 d &= 0xff; 118 return d; 119 } 120 121 static void __init ati_bugs(int num, int slot, int func) 122 { 123 u32 d; 124 u8 b; 125 126 if (acpi_use_timer_override) 127 return; 128 129 d = ati_ixp4x0_rev(num, slot, func); 130 if (d < 0x82) 131 acpi_skip_timer_override = 1; 132 else { 133 /* check for IRQ0 interrupt swap */ 134 outb(0x72, 0xcd6); b = inb(0xcd7); 135 if (!(b & 0x2)) 136 acpi_skip_timer_override = 1; 137 } 138 139 if (acpi_skip_timer_override) { 140 printk(KERN_INFO "SB4X0 revision 0x%x\n", d); 141 printk(KERN_INFO "Ignoring ACPI timer override.\n"); 142 printk(KERN_INFO "If you got timer trouble " 143 "try acpi_use_timer_override\n"); 144 } 145 } 146 147 static u32 __init ati_sbx00_rev(int num, int slot, int func) 148 { 149 u32 d; 150 151 d = read_pci_config(num, slot, func, 0x8); 152 d &= 0xff; 153 154 return d; 155 } 156 157 static void __init ati_bugs_contd(int num, int slot, int func) 158 { 159 u32 d, rev; 160 161 rev = ati_sbx00_rev(num, slot, func); 162 if (rev >= 0x40) 163 acpi_fix_pin2_polarity = 1; 164 165 /* 166 * SB600: revisions 0x11, 0x12, 0x13, 0x14, ... 167 * SB700: revisions 0x39, 0x3a, ... 168 * SB800: revisions 0x40, 0x41, ... 169 */ 170 if (rev >= 0x39) 171 return; 172 173 if (acpi_use_timer_override) 174 return; 175 176 /* check for IRQ0 interrupt swap */ 177 d = read_pci_config(num, slot, func, 0x64); 178 if (!(d & (1<<14))) 179 acpi_skip_timer_override = 1; 180 181 if (acpi_skip_timer_override) { 182 printk(KERN_INFO "SB600 revision 0x%x\n", rev); 183 printk(KERN_INFO "Ignoring ACPI timer override.\n"); 184 printk(KERN_INFO "If you got timer trouble " 185 "try acpi_use_timer_override\n"); 186 } 187 } 188 #else 189 static void __init ati_bugs(int num, int slot, int func) 190 { 191 } 192 193 static void __init ati_bugs_contd(int num, int slot, int func) 194 { 195 } 196 #endif 197 198 static void __init intel_remapping_check(int num, int slot, int func) 199 { 200 u8 revision; 201 u16 device; 202 203 device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); 204 revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); 205 206 /* 207 * Revision <= 13 of all triggering devices id in this quirk 208 * have a problem draining interrupts when irq remapping is 209 * enabled, and should be flagged as broken. Additionally 210 * revision 0x22 of device id 0x3405 has this problem. 211 */ 212 if (revision <= 0x13) 213 set_irq_remapping_broken(); 214 else if (device == 0x3405 && revision == 0x22) 215 set_irq_remapping_broken(); 216 } 217 218 /* 219 * Systems with Intel graphics controllers set aside memory exclusively 220 * for gfx driver use. This memory is not marked in the E820 as reserved 221 * or as RAM, and so is subject to overlap from E820 manipulation later 222 * in the boot process. On some systems, MMIO space is allocated on top, 223 * despite the efforts of the "RAM buffer" approach, which simply rounds 224 * memory boundaries up to 64M to try to catch space that may decode 225 * as RAM and so is not suitable for MMIO. 226 * 227 * And yes, so far on current devices the base addr is always under 4G. 228 */ 229 static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_size) 230 { 231 u32 base; 232 233 /* 234 * For the PCI IDs in this quirk, the stolen base is always 235 * in 0x5c, aka the BDSM register (yes that's really what 236 * it's called). 237 */ 238 base = read_pci_config(num, slot, func, 0x5c); 239 base &= ~((1<<20) - 1); 240 241 return base; 242 } 243 244 #define KB(x) ((x) * 1024UL) 245 #define MB(x) (KB (KB (x))) 246 #define GB(x) (MB (KB (x))) 247 248 static size_t __init i830_tseg_size(void) 249 { 250 u8 tmp = read_pci_config_byte(0, 0, 0, I830_ESMRAMC); 251 252 if (!(tmp & TSEG_ENABLE)) 253 return 0; 254 255 if (tmp & I830_TSEG_SIZE_1M) 256 return MB(1); 257 else 258 return KB(512); 259 } 260 261 static size_t __init i845_tseg_size(void) 262 { 263 u8 tmp = read_pci_config_byte(0, 0, 0, I845_ESMRAMC); 264 265 if (!(tmp & TSEG_ENABLE)) 266 return 0; 267 268 switch (tmp & I845_TSEG_SIZE_MASK) { 269 case I845_TSEG_SIZE_512K: 270 return KB(512); 271 case I845_TSEG_SIZE_1M: 272 return MB(1); 273 default: 274 WARN_ON(1); 275 return 0; 276 } 277 } 278 279 static size_t __init i85x_tseg_size(void) 280 { 281 u8 tmp = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC); 282 283 if (!(tmp & TSEG_ENABLE)) 284 return 0; 285 286 return MB(1); 287 } 288 289 static size_t __init i830_mem_size(void) 290 { 291 return read_pci_config_byte(0, 0, 0, I830_DRB3) * MB(32); 292 } 293 294 static size_t __init i85x_mem_size(void) 295 { 296 return read_pci_config_byte(0, 0, 1, I85X_DRB3) * MB(32); 297 } 298 299 /* 300 * On 830/845/85x the stolen memory base isn't available in any 301 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size. 302 */ 303 static u32 __init i830_stolen_base(int num, int slot, int func, size_t stolen_size) 304 { 305 return i830_mem_size() - i830_tseg_size() - stolen_size; 306 } 307 308 static u32 __init i845_stolen_base(int num, int slot, int func, size_t stolen_size) 309 { 310 return i830_mem_size() - i845_tseg_size() - stolen_size; 311 } 312 313 static u32 __init i85x_stolen_base(int num, int slot, int func, size_t stolen_size) 314 { 315 return i85x_mem_size() - i85x_tseg_size() - stolen_size; 316 } 317 318 static u32 __init i865_stolen_base(int num, int slot, int func, size_t stolen_size) 319 { 320 /* 321 * FIXME is the graphics stolen memory region 322 * always at TOUD? Ie. is it always the last 323 * one to be allocated by the BIOS? 324 */ 325 return read_pci_config_16(0, 0, 0, I865_TOUD) << 16; 326 } 327 328 static size_t __init i830_stolen_size(int num, int slot, int func) 329 { 330 size_t stolen_size; 331 u16 gmch_ctrl; 332 333 gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); 334 335 switch (gmch_ctrl & I830_GMCH_GMS_MASK) { 336 case I830_GMCH_GMS_STOLEN_512: 337 stolen_size = KB(512); 338 break; 339 case I830_GMCH_GMS_STOLEN_1024: 340 stolen_size = MB(1); 341 break; 342 case I830_GMCH_GMS_STOLEN_8192: 343 stolen_size = MB(8); 344 break; 345 case I830_GMCH_GMS_LOCAL: 346 /* local memory isn't part of the normal address space */ 347 stolen_size = 0; 348 break; 349 default: 350 return 0; 351 } 352 353 return stolen_size; 354 } 355 356 static size_t __init gen3_stolen_size(int num, int slot, int func) 357 { 358 size_t stolen_size; 359 u16 gmch_ctrl; 360 361 gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); 362 363 switch (gmch_ctrl & I855_GMCH_GMS_MASK) { 364 case I855_GMCH_GMS_STOLEN_1M: 365 stolen_size = MB(1); 366 break; 367 case I855_GMCH_GMS_STOLEN_4M: 368 stolen_size = MB(4); 369 break; 370 case I855_GMCH_GMS_STOLEN_8M: 371 stolen_size = MB(8); 372 break; 373 case I855_GMCH_GMS_STOLEN_16M: 374 stolen_size = MB(16); 375 break; 376 case I855_GMCH_GMS_STOLEN_32M: 377 stolen_size = MB(32); 378 break; 379 case I915_GMCH_GMS_STOLEN_48M: 380 stolen_size = MB(48); 381 break; 382 case I915_GMCH_GMS_STOLEN_64M: 383 stolen_size = MB(64); 384 break; 385 case G33_GMCH_GMS_STOLEN_128M: 386 stolen_size = MB(128); 387 break; 388 case G33_GMCH_GMS_STOLEN_256M: 389 stolen_size = MB(256); 390 break; 391 case INTEL_GMCH_GMS_STOLEN_96M: 392 stolen_size = MB(96); 393 break; 394 case INTEL_GMCH_GMS_STOLEN_160M: 395 stolen_size = MB(160); 396 break; 397 case INTEL_GMCH_GMS_STOLEN_224M: 398 stolen_size = MB(224); 399 break; 400 case INTEL_GMCH_GMS_STOLEN_352M: 401 stolen_size = MB(352); 402 break; 403 default: 404 stolen_size = 0; 405 break; 406 } 407 408 return stolen_size; 409 } 410 411 static size_t __init gen6_stolen_size(int num, int slot, int func) 412 { 413 u16 gmch_ctrl; 414 415 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 416 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; 417 gmch_ctrl &= SNB_GMCH_GMS_MASK; 418 419 return gmch_ctrl << 25; /* 32 MB units */ 420 } 421 422 static size_t __init gen8_stolen_size(int num, int slot, int func) 423 { 424 u16 gmch_ctrl; 425 426 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 427 gmch_ctrl >>= BDW_GMCH_GMS_SHIFT; 428 gmch_ctrl &= BDW_GMCH_GMS_MASK; 429 return gmch_ctrl << 25; /* 32 MB units */ 430 } 431 432 static size_t __init chv_stolen_size(int num, int slot, int func) 433 { 434 u16 gmch_ctrl; 435 436 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 437 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; 438 gmch_ctrl &= SNB_GMCH_GMS_MASK; 439 440 /* 441 * 0x0 to 0x10: 32MB increments starting at 0MB 442 * 0x11 to 0x16: 4MB increments starting at 8MB 443 * 0x17 to 0x1d: 4MB increments start at 36MB 444 */ 445 if (gmch_ctrl < 0x11) 446 return gmch_ctrl << 25; 447 else if (gmch_ctrl < 0x17) 448 return (gmch_ctrl - 0x11 + 2) << 22; 449 else 450 return (gmch_ctrl - 0x17 + 9) << 22; 451 } 452 453 struct intel_stolen_funcs { 454 size_t (*size)(int num, int slot, int func); 455 u32 (*base)(int num, int slot, int func, size_t size); 456 }; 457 458 static size_t __init gen9_stolen_size(int num, int slot, int func) 459 { 460 u16 gmch_ctrl; 461 462 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 463 gmch_ctrl >>= BDW_GMCH_GMS_SHIFT; 464 gmch_ctrl &= BDW_GMCH_GMS_MASK; 465 466 if (gmch_ctrl < 0xf0) 467 return gmch_ctrl << 25; /* 32 MB units */ 468 else 469 /* 4MB increments starting at 0xf0 for 4MB */ 470 return (gmch_ctrl - 0xf0 + 1) << 22; 471 } 472 473 typedef size_t (*stolen_size_fn)(int num, int slot, int func); 474 475 static const struct intel_stolen_funcs i830_stolen_funcs __initconst = { 476 .base = i830_stolen_base, 477 .size = i830_stolen_size, 478 }; 479 480 static const struct intel_stolen_funcs i845_stolen_funcs __initconst = { 481 .base = i845_stolen_base, 482 .size = i830_stolen_size, 483 }; 484 485 static const struct intel_stolen_funcs i85x_stolen_funcs __initconst = { 486 .base = i85x_stolen_base, 487 .size = gen3_stolen_size, 488 }; 489 490 static const struct intel_stolen_funcs i865_stolen_funcs __initconst = { 491 .base = i865_stolen_base, 492 .size = gen3_stolen_size, 493 }; 494 495 static const struct intel_stolen_funcs gen3_stolen_funcs __initconst = { 496 .base = intel_stolen_base, 497 .size = gen3_stolen_size, 498 }; 499 500 static const struct intel_stolen_funcs gen6_stolen_funcs __initconst = { 501 .base = intel_stolen_base, 502 .size = gen6_stolen_size, 503 }; 504 505 static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = { 506 .base = intel_stolen_base, 507 .size = gen8_stolen_size, 508 }; 509 510 static const struct intel_stolen_funcs gen9_stolen_funcs __initconst = { 511 .base = intel_stolen_base, 512 .size = gen9_stolen_size, 513 }; 514 515 static const struct intel_stolen_funcs chv_stolen_funcs __initconst = { 516 .base = intel_stolen_base, 517 .size = chv_stolen_size, 518 }; 519 520 static const struct pci_device_id intel_stolen_ids[] __initconst = { 521 INTEL_I830_IDS(&i830_stolen_funcs), 522 INTEL_I845G_IDS(&i845_stolen_funcs), 523 INTEL_I85X_IDS(&i85x_stolen_funcs), 524 INTEL_I865G_IDS(&i865_stolen_funcs), 525 INTEL_I915G_IDS(&gen3_stolen_funcs), 526 INTEL_I915GM_IDS(&gen3_stolen_funcs), 527 INTEL_I945G_IDS(&gen3_stolen_funcs), 528 INTEL_I945GM_IDS(&gen3_stolen_funcs), 529 INTEL_VLV_M_IDS(&gen6_stolen_funcs), 530 INTEL_VLV_D_IDS(&gen6_stolen_funcs), 531 INTEL_PINEVIEW_IDS(&gen3_stolen_funcs), 532 INTEL_I965G_IDS(&gen3_stolen_funcs), 533 INTEL_G33_IDS(&gen3_stolen_funcs), 534 INTEL_I965GM_IDS(&gen3_stolen_funcs), 535 INTEL_GM45_IDS(&gen3_stolen_funcs), 536 INTEL_G45_IDS(&gen3_stolen_funcs), 537 INTEL_IRONLAKE_D_IDS(&gen3_stolen_funcs), 538 INTEL_IRONLAKE_M_IDS(&gen3_stolen_funcs), 539 INTEL_SNB_D_IDS(&gen6_stolen_funcs), 540 INTEL_SNB_M_IDS(&gen6_stolen_funcs), 541 INTEL_IVB_M_IDS(&gen6_stolen_funcs), 542 INTEL_IVB_D_IDS(&gen6_stolen_funcs), 543 INTEL_HSW_D_IDS(&gen6_stolen_funcs), 544 INTEL_HSW_M_IDS(&gen6_stolen_funcs), 545 INTEL_BDW_M_IDS(&gen8_stolen_funcs), 546 INTEL_BDW_D_IDS(&gen8_stolen_funcs), 547 INTEL_CHV_IDS(&chv_stolen_funcs), 548 INTEL_SKL_IDS(&gen9_stolen_funcs), 549 }; 550 551 static void __init intel_graphics_stolen(int num, int slot, int func) 552 { 553 size_t size; 554 int i; 555 u32 start; 556 u16 device, subvendor, subdevice; 557 558 device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); 559 subvendor = read_pci_config_16(num, slot, func, 560 PCI_SUBSYSTEM_VENDOR_ID); 561 subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID); 562 563 for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) { 564 if (intel_stolen_ids[i].device == device) { 565 const struct intel_stolen_funcs *stolen_funcs = 566 (const struct intel_stolen_funcs *)intel_stolen_ids[i].driver_data; 567 size = stolen_funcs->size(num, slot, func); 568 start = stolen_funcs->base(num, slot, func, size); 569 if (size && start) { 570 printk(KERN_INFO "Reserving Intel graphics stolen memory at 0x%x-0x%x\n", 571 start, start + (u32)size - 1); 572 /* Mark this space as reserved */ 573 e820_add_region(start, size, E820_RESERVED); 574 sanitize_e820_map(e820.map, 575 ARRAY_SIZE(e820.map), 576 &e820.nr_map); 577 } 578 return; 579 } 580 } 581 } 582 583 static void __init force_disable_hpet(int num, int slot, int func) 584 { 585 #ifdef CONFIG_HPET_TIMER 586 boot_hpet_disable = 1; 587 pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n"); 588 #endif 589 } 590 591 592 #define QFLAG_APPLY_ONCE 0x1 593 #define QFLAG_APPLIED 0x2 594 #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) 595 struct chipset { 596 u32 vendor; 597 u32 device; 598 u32 class; 599 u32 class_mask; 600 u32 flags; 601 void (*f)(int num, int slot, int func); 602 }; 603 604 /* 605 * Only works for devices on the root bus. If you add any devices 606 * not on bus 0 readd another loop level in early_quirks(). But 607 * be careful because at least the Nvidia quirk here relies on 608 * only matching on bus 0. 609 */ 610 static struct chipset early_qrk[] __initdata = { 611 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, 612 PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs }, 613 { PCI_VENDOR_ID_VIA, PCI_ANY_ID, 614 PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, via_bugs }, 615 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB, 616 PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, fix_hypertransport_config }, 617 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS, 618 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, 619 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, 620 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, 621 { PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST, 622 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 623 { PCI_VENDOR_ID_INTEL, 0x3405, PCI_CLASS_BRIDGE_HOST, 624 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 625 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, 626 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 627 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID, 628 QFLAG_APPLY_ONCE, intel_graphics_stolen }, 629 /* 630 * HPET on current version of Baytrail platform has accuracy 631 * problems, disable it for now: 632 */ 633 { PCI_VENDOR_ID_INTEL, 0x0f00, 634 PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, 635 {} 636 }; 637 638 /** 639 * check_dev_quirk - apply early quirks to a given PCI device 640 * @num: bus number 641 * @slot: slot number 642 * @func: PCI function 643 * 644 * Check the vendor & device ID against the early quirks table. 645 * 646 * If the device is single function, let early_quirks() know so we don't 647 * poke at this device again. 648 */ 649 static int __init check_dev_quirk(int num, int slot, int func) 650 { 651 u16 class; 652 u16 vendor; 653 u16 device; 654 u8 type; 655 int i; 656 657 class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE); 658 659 if (class == 0xffff) 660 return -1; /* no class, treat as single function */ 661 662 vendor = read_pci_config_16(num, slot, func, PCI_VENDOR_ID); 663 664 device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); 665 666 for (i = 0; early_qrk[i].f != NULL; i++) { 667 if (((early_qrk[i].vendor == PCI_ANY_ID) || 668 (early_qrk[i].vendor == vendor)) && 669 ((early_qrk[i].device == PCI_ANY_ID) || 670 (early_qrk[i].device == device)) && 671 (!((early_qrk[i].class ^ class) & 672 early_qrk[i].class_mask))) { 673 if ((early_qrk[i].flags & 674 QFLAG_DONE) != QFLAG_DONE) 675 early_qrk[i].f(num, slot, func); 676 early_qrk[i].flags |= QFLAG_APPLIED; 677 } 678 } 679 680 type = read_pci_config_byte(num, slot, func, 681 PCI_HEADER_TYPE); 682 if (!(type & 0x80)) 683 return -1; 684 685 return 0; 686 } 687 688 void __init early_quirks(void) 689 { 690 int slot, func; 691 692 if (!early_pci_allowed()) 693 return; 694 695 /* Poor man's PCI discovery */ 696 /* Only scan the root bus */ 697 for (slot = 0; slot < 32; slot++) 698 for (func = 0; func < 8; func++) { 699 /* Only probe function 0 on single fn devices */ 700 if (check_dev_quirk(0, slot, func)) 701 break; 702 } 703 } 704