centaur.c (0ca0f16fd17c5d880dd0abbe03595b0c7c5b3c95) | centaur.c (48f4c485c275e9550fa1a1191768689cc3ae0037) |
---|---|
1#include <linux/bitops.h> |
|
1#include <linux/kernel.h> 2#include <linux/init.h> | 2#include <linux/kernel.h> 3#include <linux/init.h> |
3#include <linux/bitops.h> | |
4 5#include <asm/processor.h> | 4 5#include <asm/processor.h> |
6#include <asm/msr.h> | |
7#include <asm/e820.h> 8#include <asm/mtrr.h> | 6#include <asm/e820.h> 7#include <asm/mtrr.h> |
8#include <asm/msr.h> |
|
9 10#include "cpu.h" 11 12#ifdef CONFIG_X86_OOSTORE 13 14static u32 __cpuinit power2(u32 x) 15{ 16 u32 s = 1; --- 254 unchanged lines hidden (view full) --- 271 printk(KERN_INFO "CPU: Enabled h/w RNG\n"); 272 } 273 274 /* store Centaur Extended Feature Flags as 275 * word 5 of the CPU capability bit array 276 */ 277 c->x86_capability[5] = cpuid_edx(0xC0000001); 278 } | 9 10#include "cpu.h" 11 12#ifdef CONFIG_X86_OOSTORE 13 14static u32 __cpuinit power2(u32 x) 15{ 16 u32 s = 1; --- 254 unchanged lines hidden (view full) --- 271 printk(KERN_INFO "CPU: Enabled h/w RNG\n"); 272 } 273 274 /* store Centaur Extended Feature Flags as 275 * word 5 of the CPU capability bit array 276 */ 277 c->x86_capability[5] = cpuid_edx(0xC0000001); 278 } |
279 | 279#ifdef CONFIG_X86_32 |
280 /* Cyrix III family needs CX8 & PGE explicitly enabled. */ 281 if (c->x86_model >= 6 && c->x86_model <= 9) { 282 rdmsr(MSR_VIA_FCR, lo, hi); 283 lo |= (1<<1 | 1<<7); 284 wrmsr(MSR_VIA_FCR, lo, hi); 285 set_cpu_cap(c, X86_FEATURE_CX8); 286 } 287 288 /* Before Nehemiah, the C3's had 3dNOW! */ 289 if (c->x86_model >= 6 && c->x86_model < 9) 290 set_cpu_cap(c, X86_FEATURE_3DNOW); | 280 /* Cyrix III family needs CX8 & PGE explicitly enabled. */ 281 if (c->x86_model >= 6 && c->x86_model <= 9) { 282 rdmsr(MSR_VIA_FCR, lo, hi); 283 lo |= (1<<1 | 1<<7); 284 wrmsr(MSR_VIA_FCR, lo, hi); 285 set_cpu_cap(c, X86_FEATURE_CX8); 286 } 287 288 /* Before Nehemiah, the C3's had 3dNOW! */ 289 if (c->x86_model >= 6 && c->x86_model < 9) 290 set_cpu_cap(c, X86_FEATURE_3DNOW); |
291#endif 292 if (c->x86 == 0x6 && c->x86_model >= 0xf) { 293 c->x86_cache_alignment = c->x86_clflush_size * 2; 294 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 295 } |
|
291 292 display_cacheinfo(c); 293} 294 295enum { 296 ECX8 = 1<<1, 297 EIERRINT = 1<<2, 298 DPM = 1<<3, --- 12 unchanged lines hidden (view full) --- 311 ERETSTK = 1<<16, 312 E2MMX = 1<<19, 313 EAMD3D = 1<<20, 314}; 315 316static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) 317{ 318 switch (c->x86) { | 296 297 display_cacheinfo(c); 298} 299 300enum { 301 ECX8 = 1<<1, 302 EIERRINT = 1<<2, 303 DPM = 1<<3, --- 12 unchanged lines hidden (view full) --- 316 ERETSTK = 1<<16, 317 E2MMX = 1<<19, 318 EAMD3D = 1<<20, 319}; 320 321static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) 322{ 323 switch (c->x86) { |
324#ifdef CONFIG_X86_32 |
|
319 case 5: 320 /* Emulate MTRRs using Centaur's MCR. */ 321 set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); 322 break; | 325 case 5: 326 /* Emulate MTRRs using Centaur's MCR. */ 327 set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); 328 break; |
329#endif 330 case 6: 331 if (c->x86_model >= 0xf) 332 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 333 break; |
|
323 } | 334 } |
335#ifdef CONFIG_X86_64 336 set_cpu_cap(c, X86_FEATURE_SYSENTER32); 337#endif |
|
324} 325 326static void __cpuinit init_centaur(struct cpuinfo_x86 *c) 327{ | 338} 339 340static void __cpuinit init_centaur(struct cpuinfo_x86 *c) 341{ |
328 | 342#ifdef CONFIG_X86_32 |
329 char *name; 330 u32 fcr_set = 0; 331 u32 fcr_clr = 0; 332 u32 lo, hi, newlo; 333 u32 aa, bb, cc, dd; 334 335 /* 336 * Bit 31 in normal CPUID used for nonstandard 3DNow ID; 337 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway 338 */ 339 clear_cpu_cap(c, 0*32+31); | 343 char *name; 344 u32 fcr_set = 0; 345 u32 fcr_clr = 0; 346 u32 lo, hi, newlo; 347 u32 aa, bb, cc, dd; 348 349 /* 350 * Bit 31 in normal CPUID used for nonstandard 3DNow ID; 351 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway 352 */ 353 clear_cpu_cap(c, 0*32+31); |
340 | 354#endif 355 early_init_centaur(c); |
341 switch (c->x86) { | 356 switch (c->x86) { |
357#ifdef CONFIG_X86_32 |
|
342 case 5: 343 switch (c->x86_model) { 344 case 4: 345 name = "C6"; 346 fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK; 347 fcr_clr = DPDC; 348 printk(KERN_NOTICE "Disabling bugged TSC.\n"); 349 clear_cpu_cap(c, X86_FEATURE_TSC); --- 87 unchanged lines hidden (view full) --- 437 if (cpuid_eax(0x80000000) >= 0x80000005) { 438 /* Yes, we can. */ 439 cpuid(0x80000005, &aa, &bb, &cc, &dd); 440 /* Add L1 data and code cache sizes. */ 441 c->x86_cache_size = (cc>>24)+(dd>>24); 442 } 443 sprintf(c->x86_model_id, "WinChip %s", name); 444 break; | 358 case 5: 359 switch (c->x86_model) { 360 case 4: 361 name = "C6"; 362 fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK; 363 fcr_clr = DPDC; 364 printk(KERN_NOTICE "Disabling bugged TSC.\n"); 365 clear_cpu_cap(c, X86_FEATURE_TSC); --- 87 unchanged lines hidden (view full) --- 453 if (cpuid_eax(0x80000000) >= 0x80000005) { 454 /* Yes, we can. */ 455 cpuid(0x80000005, &aa, &bb, &cc, &dd); 456 /* Add L1 data and code cache sizes. */ 457 c->x86_cache_size = (cc>>24)+(dd>>24); 458 } 459 sprintf(c->x86_model_id, "WinChip %s", name); 460 break; |
445 | 461#endif |
446 case 6: 447 init_c3(c); 448 break; 449 } | 462 case 6: 463 init_c3(c); 464 break; 465 } |
466#ifdef CONFIG_X86_64 467 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 468#endif |
|
450} 451 452static unsigned int __cpuinit 453centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) 454{ | 469} 470 471static unsigned int __cpuinit 472centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) 473{ |
474#ifdef CONFIG_X86_32 |
|
455 /* VIA C3 CPUs (670-68F) need further shifting. */ 456 if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) 457 size >>= 8; 458 459 /* 460 * There's also an erratum in Nehemiah stepping 1, which 461 * returns '65KB' instead of '64KB' 462 * - Note, it seems this may only be in engineering samples. 463 */ 464 if ((c->x86 == 6) && (c->x86_model == 9) && 465 (c->x86_mask == 1) && (size == 65)) 466 size -= 1; | 475 /* VIA C3 CPUs (670-68F) need further shifting. */ 476 if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) 477 size >>= 8; 478 479 /* 480 * There's also an erratum in Nehemiah stepping 1, which 481 * returns '65KB' instead of '64KB' 482 * - Note, it seems this may only be in engineering samples. 483 */ 484 if ((c->x86 == 6) && (c->x86_model == 9) && 485 (c->x86_mask == 1) && (size == 65)) 486 size -= 1; |
467 | 487#endif |
468 return size; 469} 470 471static const struct cpu_dev __cpuinitconst centaur_cpu_dev = { 472 .c_vendor = "Centaur", 473 .c_ident = { "CentaurHauls" }, 474 .c_early_init = early_init_centaur, 475 .c_init = init_centaur, 476 .c_size_cache = centaur_size_cache, 477 .c_x86_vendor = X86_VENDOR_CENTAUR, 478}; 479 480cpu_dev_register(centaur_cpu_dev); | 488 return size; 489} 490 491static const struct cpu_dev __cpuinitconst centaur_cpu_dev = { 492 .c_vendor = "Centaur", 493 .c_ident = { "CentaurHauls" }, 494 .c_early_init = early_init_centaur, 495 .c_init = init_centaur, 496 .c_size_cache = centaur_size_cache, 497 .c_x86_vendor = X86_VENDOR_CENTAUR, 498}; 499 500cpu_dev_register(centaur_cpu_dev); |