1 #include <linux/init.h> 2 #include <linux/bitops.h> 3 #include <linux/mm.h> 4 5 #include <asm/io.h> 6 #include <asm/processor.h> 7 #include <asm/apic.h> 8 #include <asm/cpu.h> 9 #include <asm/pci-direct.h> 10 11 #ifdef CONFIG_X86_64 12 # include <asm/numa_64.h> 13 # include <asm/mmconfig.h> 14 # include <asm/cacheflush.h> 15 #endif 16 17 #include "cpu.h" 18 19 #ifdef CONFIG_X86_32 20 /* 21 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause 22 * misexecution of code under Linux. Owners of such processors should 23 * contact AMD for precise details and a CPU swap. 24 * 25 * See http://www.multimania.com/poulot/k6bug.html 26 * http://www.amd.com/K6/k6docs/revgd.html 27 * 28 * The following test is erm.. interesting. AMD neglected to up 29 * the chip setting when fixing the bug but they also tweaked some 30 * performance at the same time.. 31 */ 32 33 extern void vide(void); 34 __asm__(".align 4\nvide: ret"); 35 36 static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) 37 { 38 /* 39 * General Systems BIOSen alias the cpu frequency registers 40 * of the Elan at 0x000df000. Unfortuantly, one of the Linux 41 * drivers subsequently pokes it, and changes the CPU speed. 42 * Workaround : Remove the unneeded alias. 43 */ 44 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ 45 #define CBAR_ENB (0x80000000) 46 #define CBAR_KEY (0X000000CB) 47 if (c->x86_model == 9 || c->x86_model == 10) { 48 if (inl (CBAR) & CBAR_ENB) 49 outl (0 | CBAR_KEY, CBAR); 50 } 51 } 52 53 54 static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) 55 { 56 u32 l, h; 57 int mbytes = num_physpages >> (20-PAGE_SHIFT); 58 59 if (c->x86_model < 6) { 60 /* Based on AMD doc 20734R - June 2000 */ 61 if (c->x86_model == 0) { 62 clear_cpu_cap(c, X86_FEATURE_APIC); 63 set_cpu_cap(c, X86_FEATURE_PGE); 64 } 65 return; 66 } 67 68 if (c->x86_model == 6 && c->x86_mask == 1) { 69 const int K6_BUG_LOOP = 1000000; 70 int n; 71 void (*f_vide)(void); 72 unsigned long d, d2; 73 74 printk(KERN_INFO "AMD K6 stepping B detected - "); 75 76 /* 77 * It looks like AMD fixed the 2.6.2 bug and improved indirect 78 * calls at the same time. 79 */ 80 81 n = K6_BUG_LOOP; 82 f_vide = vide; 83 rdtscl(d); 84 while (n--) 85 f_vide(); 86 rdtscl(d2); 87 d = d2-d; 88 89 if (d > 20*K6_BUG_LOOP) 90 printk("system stability may be impaired when more than 32 MB are used.\n"); 91 else 92 printk("probably OK (after B9730xxxx).\n"); 93 printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); 94 } 95 96 /* K6 with old style WHCR */ 97 if (c->x86_model < 8 || 98 (c->x86_model == 8 && c->x86_mask < 8)) { 99 /* We can only write allocate on the low 508Mb */ 100 if (mbytes > 508) 101 mbytes = 508; 102 103 rdmsr(MSR_K6_WHCR, l, h); 104 if ((l&0x0000FFFF) == 0) { 105 unsigned long flags; 106 l = (1<<0)|((mbytes/4)<<1); 107 local_irq_save(flags); 108 wbinvd(); 109 wrmsr(MSR_K6_WHCR, l, h); 110 local_irq_restore(flags); 111 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", 112 mbytes); 113 } 114 return; 115 } 116 117 if ((c->x86_model == 8 && c->x86_mask > 7) || 118 c->x86_model == 9 || c->x86_model == 13) { 119 /* The more serious chips .. */ 120 121 if (mbytes > 4092) 122 mbytes = 4092; 123 124 rdmsr(MSR_K6_WHCR, l, h); 125 if ((l&0xFFFF0000) == 0) { 126 unsigned long flags; 127 l = ((mbytes>>2)<<22)|(1<<16); 128 local_irq_save(flags); 129 wbinvd(); 130 wrmsr(MSR_K6_WHCR, l, h); 131 local_irq_restore(flags); 132 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", 133 mbytes); 134 } 135 136 return; 137 } 138 139 if (c->x86_model == 10) { 140 /* AMD Geode LX is model 10 */ 141 /* placeholder for any needed mods */ 142 return; 143 } 144 } 145 146 static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) 147 { 148 #ifdef CONFIG_SMP 149 /* calling is from identify_secondary_cpu() ? */ 150 if (c->cpu_index == boot_cpu_id) 151 return; 152 153 /* 154 * Certain Athlons might work (for various values of 'work') in SMP 155 * but they are not certified as MP capable. 156 */ 157 /* Athlon 660/661 is valid. */ 158 if ((c->x86_model == 6) && ((c->x86_mask == 0) || 159 (c->x86_mask == 1))) 160 goto valid_k7; 161 162 /* Duron 670 is valid */ 163 if ((c->x86_model == 7) && (c->x86_mask == 0)) 164 goto valid_k7; 165 166 /* 167 * Athlon 662, Duron 671, and Athlon >model 7 have capability 168 * bit. It's worth noting that the A5 stepping (662) of some 169 * Athlon XP's have the MP bit set. 170 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for 171 * more. 172 */ 173 if (((c->x86_model == 6) && (c->x86_mask >= 2)) || 174 ((c->x86_model == 7) && (c->x86_mask >= 1)) || 175 (c->x86_model > 7)) 176 if (cpu_has_mp) 177 goto valid_k7; 178 179 /* If we get here, not a certified SMP capable AMD system. */ 180 181 /* 182 * Don't taint if we are running SMP kernel on a single non-MP 183 * approved Athlon 184 */ 185 WARN_ONCE(1, "WARNING: This combination of AMD" 186 "processors is not suitable for SMP.\n"); 187 if (!test_taint(TAINT_UNSAFE_SMP)) 188 add_taint(TAINT_UNSAFE_SMP); 189 190 valid_k7: 191 ; 192 #endif 193 } 194 195 static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) 196 { 197 u32 l, h; 198 199 /* 200 * Bit 15 of Athlon specific MSR 15, needs to be 0 201 * to enable SSE on Palomino/Morgan/Barton CPU's. 202 * If the BIOS didn't enable it already, enable it here. 203 */ 204 if (c->x86_model >= 6 && c->x86_model <= 10) { 205 if (!cpu_has(c, X86_FEATURE_XMM)) { 206 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); 207 rdmsr(MSR_K7_HWCR, l, h); 208 l &= ~0x00008000; 209 wrmsr(MSR_K7_HWCR, l, h); 210 set_cpu_cap(c, X86_FEATURE_XMM); 211 } 212 } 213 214 /* 215 * It's been determined by AMD that Athlons since model 8 stepping 1 216 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx 217 * As per AMD technical note 27212 0.2 218 */ 219 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { 220 rdmsr(MSR_K7_CLK_CTL, l, h); 221 if ((l & 0xfff00000) != 0x20000000) { 222 printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, 223 ((l & 0x000fffff)|0x20000000)); 224 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); 225 } 226 } 227 228 set_cpu_cap(c, X86_FEATURE_K7); 229 230 amd_k7_smp_check(c); 231 } 232 #endif 233 234 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) 235 static int __cpuinit nearby_node(int apicid) 236 { 237 int i, node; 238 239 for (i = apicid - 1; i >= 0; i--) { 240 node = apicid_to_node[i]; 241 if (node != NUMA_NO_NODE && node_online(node)) 242 return node; 243 } 244 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { 245 node = apicid_to_node[i]; 246 if (node != NUMA_NO_NODE && node_online(node)) 247 return node; 248 } 249 return first_node(node_online_map); /* Shouldn't happen */ 250 } 251 #endif 252 253 /* 254 * On a AMD dual core setup the lower bits of the APIC id distingush the cores. 255 * Assumes number of cores is a power of two. 256 */ 257 static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) 258 { 259 #ifdef CONFIG_X86_HT 260 unsigned bits; 261 262 bits = c->x86_coreid_bits; 263 264 /* Low order bits define the core id (index of core in socket) */ 265 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); 266 /* Convert the initial APIC ID into the socket ID */ 267 c->phys_proc_id = c->initial_apicid >> bits; 268 #endif 269 } 270 271 static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) 272 { 273 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) 274 int cpu = smp_processor_id(); 275 int node; 276 unsigned apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid; 277 278 node = c->phys_proc_id; 279 if (apicid_to_node[apicid] != NUMA_NO_NODE) 280 node = apicid_to_node[apicid]; 281 if (!node_online(node)) { 282 /* Two possibilities here: 283 - The CPU is missing memory and no node was created. 284 In that case try picking one from a nearby CPU 285 - The APIC IDs differ from the HyperTransport node IDs 286 which the K8 northbridge parsing fills in. 287 Assume they are all increased by a constant offset, 288 but in the same order as the HT nodeids. 289 If that doesn't result in a usable node fall back to the 290 path for the previous case. */ 291 292 int ht_nodeid = c->initial_apicid; 293 294 if (ht_nodeid >= 0 && 295 apicid_to_node[ht_nodeid] != NUMA_NO_NODE) 296 node = apicid_to_node[ht_nodeid]; 297 /* Pick a nearby node */ 298 if (!node_online(node)) 299 node = nearby_node(apicid); 300 } 301 numa_set_node(cpu, node); 302 303 printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node); 304 #endif 305 } 306 307 static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) 308 { 309 #ifdef CONFIG_X86_HT 310 unsigned bits, ecx; 311 312 /* Multi core CPU? */ 313 if (c->extended_cpuid_level < 0x80000008) 314 return; 315 316 ecx = cpuid_ecx(0x80000008); 317 318 c->x86_max_cores = (ecx & 0xff) + 1; 319 320 /* CPU telling us the core id bits shift? */ 321 bits = (ecx >> 12) & 0xF; 322 323 /* Otherwise recompute */ 324 if (bits == 0) { 325 while ((1 << bits) < c->x86_max_cores) 326 bits++; 327 } 328 329 c->x86_coreid_bits = bits; 330 #endif 331 } 332 333 static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) 334 { 335 early_init_amd_mc(c); 336 337 /* 338 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 339 * with P/T states and does not stop in deep C-states 340 */ 341 if (c->x86_power & (1 << 8)) { 342 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 343 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 344 } 345 346 #ifdef CONFIG_X86_64 347 set_cpu_cap(c, X86_FEATURE_SYSCALL32); 348 #else 349 /* Set MTRR capability flag if appropriate */ 350 if (c->x86 == 5) 351 if (c->x86_model == 13 || c->x86_model == 9 || 352 (c->x86_model == 8 && c->x86_mask >= 8)) 353 set_cpu_cap(c, X86_FEATURE_K6_MTRR); 354 #endif 355 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) 356 /* check CPU config space for extended APIC ID */ 357 if (c->x86 >= 0xf) { 358 unsigned int val; 359 val = read_pci_config(0, 24, 0, 0x68); 360 if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18))) 361 set_cpu_cap(c, X86_FEATURE_EXTD_APICID); 362 } 363 #endif 364 } 365 366 static void __cpuinit init_amd(struct cpuinfo_x86 *c) 367 { 368 #ifdef CONFIG_SMP 369 unsigned long long value; 370 371 /* 372 * Disable TLB flush filter by setting HWCR.FFDIS on K8 373 * bit 6 of msr C001_0015 374 * 375 * Errata 63 for SH-B3 steppings 376 * Errata 122 for all steppings (F+ have it disabled by default) 377 */ 378 if (c->x86 == 0xf) { 379 rdmsrl(MSR_K7_HWCR, value); 380 value |= 1 << 6; 381 wrmsrl(MSR_K7_HWCR, value); 382 } 383 #endif 384 385 early_init_amd(c); 386 387 /* 388 * Bit 31 in normal CPUID used for nonstandard 3DNow ID; 389 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway 390 */ 391 clear_cpu_cap(c, 0*32+31); 392 393 #ifdef CONFIG_X86_64 394 /* On C+ stepping K8 rep microcode works well for copy/memset */ 395 if (c->x86 == 0xf) { 396 u32 level; 397 398 level = cpuid_eax(1); 399 if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) 400 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 401 } 402 if (c->x86 == 0x10 || c->x86 == 0x11) 403 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 404 #else 405 406 /* 407 * FIXME: We should handle the K5 here. Set up the write 408 * range and also turn on MSR 83 bits 4 and 31 (write alloc, 409 * no bus pipeline) 410 */ 411 412 switch (c->x86) { 413 case 4: 414 init_amd_k5(c); 415 break; 416 case 5: 417 init_amd_k6(c); 418 break; 419 case 6: /* An Athlon/Duron */ 420 init_amd_k7(c); 421 break; 422 } 423 424 /* K6s reports MCEs but don't actually have all the MSRs */ 425 if (c->x86 < 6) 426 clear_cpu_cap(c, X86_FEATURE_MCE); 427 #endif 428 429 /* Enable workaround for FXSAVE leak */ 430 if (c->x86 >= 6) 431 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); 432 433 if (!c->x86_model_id[0]) { 434 switch (c->x86) { 435 case 0xf: 436 /* Should distinguish Models here, but this is only 437 a fallback anyways. */ 438 strcpy(c->x86_model_id, "Hammer"); 439 break; 440 } 441 } 442 443 display_cacheinfo(c); 444 445 /* Multi core CPU? */ 446 if (c->extended_cpuid_level >= 0x80000008) { 447 amd_detect_cmp(c); 448 srat_detect_node(c); 449 } 450 451 #ifdef CONFIG_X86_32 452 detect_ht(c); 453 #endif 454 455 if (c->extended_cpuid_level >= 0x80000006) { 456 if ((c->x86 >= 0x0f) && (cpuid_edx(0x80000006) & 0xf000)) 457 num_cache_leaves = 4; 458 else 459 num_cache_leaves = 3; 460 } 461 462 if (c->x86 >= 0xf && c->x86 <= 0x11) 463 set_cpu_cap(c, X86_FEATURE_K8); 464 465 if (cpu_has_xmm2) { 466 /* MFENCE stops RDTSC speculation */ 467 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); 468 } 469 470 #ifdef CONFIG_X86_64 471 if (c->x86 == 0x10) { 472 /* do this for boot cpu */ 473 if (c == &boot_cpu_data) 474 check_enable_amd_mmconf_dmi(); 475 476 fam10h_check_enable_mmcfg(); 477 } 478 479 if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) { 480 unsigned long long tseg; 481 482 /* 483 * Split up direct mapping around the TSEG SMM area. 484 * Don't do it for gbpages because there seems very little 485 * benefit in doing so. 486 */ 487 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { 488 printk(KERN_DEBUG "tseg: %010llx\n", tseg); 489 if ((tseg>>PMD_SHIFT) < 490 (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || 491 ((tseg>>PMD_SHIFT) < 492 (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && 493 (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) 494 set_memory_4k((unsigned long)__va(tseg), 1); 495 } 496 } 497 #endif 498 } 499 500 #ifdef CONFIG_X86_32 501 static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) 502 { 503 /* AMD errata T13 (order #21922) */ 504 if ((c->x86 == 6)) { 505 if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */ 506 size = 64; 507 if (c->x86_model == 4 && 508 (c->x86_mask == 0 || c->x86_mask == 1)) /* Tbird rev A1/A2 */ 509 size = 256; 510 } 511 return size; 512 } 513 #endif 514 515 static const struct cpu_dev __cpuinitconst amd_cpu_dev = { 516 .c_vendor = "AMD", 517 .c_ident = { "AuthenticAMD" }, 518 #ifdef CONFIG_X86_32 519 .c_models = { 520 { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = 521 { 522 [3] = "486 DX/2", 523 [7] = "486 DX/2-WB", 524 [8] = "486 DX/4", 525 [9] = "486 DX/4-WB", 526 [14] = "Am5x86-WT", 527 [15] = "Am5x86-WB" 528 } 529 }, 530 }, 531 .c_size_cache = amd_size_cache, 532 #endif 533 .c_early_init = early_init_amd, 534 .c_init = init_amd, 535 .c_x86_vendor = X86_VENDOR_AMD, 536 }; 537 538 cpu_dev_register(amd_cpu_dev); 539