1 #include <linux/init.h> 2 #include <linux/bitops.h> 3 #include <linux/mm.h> 4 #include <asm/io.h> 5 #include <asm/processor.h> 6 #include <asm/apic.h> 7 8 #include <mach_apic.h> 9 #include "cpu.h" 10 11 /* 12 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause 13 * misexecution of code under Linux. Owners of such processors should 14 * contact AMD for precise details and a CPU swap. 15 * 16 * See http://www.multimania.com/poulot/k6bug.html 17 * http://www.amd.com/K6/k6docs/revgd.html 18 * 19 * The following test is erm.. interesting. AMD neglected to up 20 * the chip setting when fixing the bug but they also tweaked some 21 * performance at the same time.. 22 */ 23 24 extern void vide(void); 25 __asm__(".align 4\nvide: ret"); 26 27 #ifdef CONFIG_X86_LOCAL_APIC 28 #define ENABLE_C1E_MASK 0x18000000 29 #define CPUID_PROCESSOR_SIGNATURE 1 30 #define CPUID_XFAM 0x0ff00000 31 #define CPUID_XFAM_K8 0x00000000 32 #define CPUID_XFAM_10H 0x00100000 33 #define CPUID_XFAM_11H 0x00200000 34 #define CPUID_XMOD 0x000f0000 35 #define CPUID_XMOD_REV_F 0x00040000 36 37 /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */ 38 static __cpuinit int amd_apic_timer_broken(void) 39 { 40 u32 lo, hi; 41 u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); 42 switch (eax & CPUID_XFAM) { 43 case CPUID_XFAM_K8: 44 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F) 45 break; 46 case CPUID_XFAM_10H: 47 case CPUID_XFAM_11H: 48 rdmsr(MSR_K8_ENABLE_C1E, lo, hi); 49 if (lo & ENABLE_C1E_MASK) { 50 if (smp_processor_id() != boot_cpu_physical_apicid) 51 printk(KERN_INFO "AMD C1E detected late. " 52 " Force timer broadcast.\n"); 53 return 1; 54 } 55 break; 56 default: 57 /* err on the side of caution */ 58 return 1; 59 } 60 return 0; 61 } 62 #endif 63 64 int force_mwait __cpuinitdata; 65 66 static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) 67 { 68 if (cpuid_eax(0x80000000) >= 0x80000007) { 69 c->x86_power = cpuid_edx(0x80000007); 70 if (c->x86_power & (1<<8)) 71 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 72 } 73 } 74 75 static void __cpuinit init_amd(struct cpuinfo_x86 *c) 76 { 77 u32 l, h; 78 int mbytes = num_physpages >> (20-PAGE_SHIFT); 79 int r; 80 81 #ifdef CONFIG_SMP 82 unsigned long long value; 83 84 /* 85 * Disable TLB flush filter by setting HWCR.FFDIS on K8 86 * bit 6 of msr C001_0015 87 * 88 * Errata 63 for SH-B3 steppings 89 * Errata 122 for all steppings (F+ have it disabled by default) 90 */ 91 if (c->x86 == 15) { 92 rdmsrl(MSR_K7_HWCR, value); 93 value |= 1 << 6; 94 wrmsrl(MSR_K7_HWCR, value); 95 } 96 #endif 97 98 early_init_amd(c); 99 100 /* 101 * FIXME: We should handle the K5 here. Set up the write 102 * range and also turn on MSR 83 bits 4 and 31 (write alloc, 103 * no bus pipeline) 104 */ 105 106 /* 107 * Bit 31 in normal CPUID used for nonstandard 3DNow ID; 108 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway 109 */ 110 clear_cpu_cap(c, 0*32+31); 111 112 r = get_model_name(c); 113 114 switch (c->x86) { 115 case 4: 116 /* 117 * General Systems BIOSen alias the cpu frequency registers 118 * of the Elan at 0x000df000. Unfortuantly, one of the Linux 119 * drivers subsequently pokes it, and changes the CPU speed. 120 * Workaround : Remove the unneeded alias. 121 */ 122 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ 123 #define CBAR_ENB (0x80000000) 124 #define CBAR_KEY (0X000000CB) 125 if (c->x86_model == 9 || c->x86_model == 10) { 126 if (inl (CBAR) & CBAR_ENB) 127 outl (0 | CBAR_KEY, CBAR); 128 } 129 break; 130 case 5: 131 if (c->x86_model < 6) { 132 /* Based on AMD doc 20734R - June 2000 */ 133 if (c->x86_model == 0) { 134 clear_cpu_cap(c, X86_FEATURE_APIC); 135 set_cpu_cap(c, X86_FEATURE_PGE); 136 } 137 break; 138 } 139 140 if (c->x86_model == 6 && c->x86_mask == 1) { 141 const int K6_BUG_LOOP = 1000000; 142 int n; 143 void (*f_vide)(void); 144 unsigned long d, d2; 145 146 printk(KERN_INFO "AMD K6 stepping B detected - "); 147 148 /* 149 * It looks like AMD fixed the 2.6.2 bug and improved indirect 150 * calls at the same time. 151 */ 152 153 n = K6_BUG_LOOP; 154 f_vide = vide; 155 rdtscl(d); 156 while (n--) 157 f_vide(); 158 rdtscl(d2); 159 d = d2-d; 160 161 if (d > 20*K6_BUG_LOOP) 162 printk("system stability may be impaired when more than 32 MB are used.\n"); 163 else 164 printk("probably OK (after B9730xxxx).\n"); 165 printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); 166 } 167 168 /* K6 with old style WHCR */ 169 if (c->x86_model < 8 || 170 (c->x86_model == 8 && c->x86_mask < 8)) { 171 /* We can only write allocate on the low 508Mb */ 172 if (mbytes > 508) 173 mbytes = 508; 174 175 rdmsr(MSR_K6_WHCR, l, h); 176 if ((l&0x0000FFFF) == 0) { 177 unsigned long flags; 178 l = (1<<0)|((mbytes/4)<<1); 179 local_irq_save(flags); 180 wbinvd(); 181 wrmsr(MSR_K6_WHCR, l, h); 182 local_irq_restore(flags); 183 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", 184 mbytes); 185 } 186 break; 187 } 188 189 if ((c->x86_model == 8 && c->x86_mask > 7) || 190 c->x86_model == 9 || c->x86_model == 13) { 191 /* The more serious chips .. */ 192 193 if (mbytes > 4092) 194 mbytes = 4092; 195 196 rdmsr(MSR_K6_WHCR, l, h); 197 if ((l&0xFFFF0000) == 0) { 198 unsigned long flags; 199 l = ((mbytes>>2)<<22)|(1<<16); 200 local_irq_save(flags); 201 wbinvd(); 202 wrmsr(MSR_K6_WHCR, l, h); 203 local_irq_restore(flags); 204 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", 205 mbytes); 206 } 207 208 /* Set MTRR capability flag if appropriate */ 209 if (c->x86_model == 13 || c->x86_model == 9 || 210 (c->x86_model == 8 && c->x86_mask >= 8)) 211 set_cpu_cap(c, X86_FEATURE_K6_MTRR); 212 break; 213 } 214 215 if (c->x86_model == 10) { 216 /* AMD Geode LX is model 10 */ 217 /* placeholder for any needed mods */ 218 break; 219 } 220 break; 221 case 6: /* An Athlon/Duron */ 222 223 /* 224 * Bit 15 of Athlon specific MSR 15, needs to be 0 225 * to enable SSE on Palomino/Morgan/Barton CPU's. 226 * If the BIOS didn't enable it already, enable it here. 227 */ 228 if (c->x86_model >= 6 && c->x86_model <= 10) { 229 if (!cpu_has(c, X86_FEATURE_XMM)) { 230 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); 231 rdmsr(MSR_K7_HWCR, l, h); 232 l &= ~0x00008000; 233 wrmsr(MSR_K7_HWCR, l, h); 234 set_cpu_cap(c, X86_FEATURE_XMM); 235 } 236 } 237 238 /* 239 * It's been determined by AMD that Athlons since model 8 stepping 1 240 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx 241 * As per AMD technical note 27212 0.2 242 */ 243 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { 244 rdmsr(MSR_K7_CLK_CTL, l, h); 245 if ((l & 0xfff00000) != 0x20000000) { 246 printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, 247 ((l & 0x000fffff)|0x20000000)); 248 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); 249 } 250 } 251 break; 252 } 253 254 switch (c->x86) { 255 case 15: 256 /* Use K8 tuning for Fam10h and Fam11h */ 257 case 0x10: 258 case 0x11: 259 set_cpu_cap(c, X86_FEATURE_K8); 260 break; 261 case 6: 262 set_cpu_cap(c, X86_FEATURE_K7); 263 break; 264 } 265 if (c->x86 >= 6) 266 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); 267 268 display_cacheinfo(c); 269 270 if (cpuid_eax(0x80000000) >= 0x80000008) 271 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; 272 273 #ifdef CONFIG_X86_HT 274 /* 275 * On a AMD multi core setup the lower bits of the APIC id 276 * distinguish the cores. 277 */ 278 if (c->x86_max_cores > 1) { 279 int cpu = smp_processor_id(); 280 unsigned bits = (cpuid_ecx(0x80000008) >> 12) & 0xf; 281 282 if (bits == 0) { 283 while ((1 << bits) < c->x86_max_cores) 284 bits++; 285 } 286 c->cpu_core_id = c->phys_proc_id & ((1<<bits)-1); 287 c->phys_proc_id >>= bits; 288 printk(KERN_INFO "CPU %d(%d) -> Core %d\n", 289 cpu, c->x86_max_cores, c->cpu_core_id); 290 } 291 #endif 292 293 if (cpuid_eax(0x80000000) >= 0x80000006) { 294 if ((c->x86 == 0x10) && (cpuid_edx(0x80000006) & 0xf000)) 295 num_cache_leaves = 4; 296 else 297 num_cache_leaves = 3; 298 } 299 300 #ifdef CONFIG_X86_LOCAL_APIC 301 if (amd_apic_timer_broken()) 302 local_apic_timer_disabled = 1; 303 #endif 304 305 /* K6s reports MCEs but don't actually have all the MSRs */ 306 if (c->x86 < 6) 307 clear_cpu_cap(c, X86_FEATURE_MCE); 308 309 if (cpu_has_xmm2) 310 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); 311 } 312 313 static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) 314 { 315 /* AMD errata T13 (order #21922) */ 316 if ((c->x86 == 6)) { 317 if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */ 318 size = 64; 319 if (c->x86_model == 4 && 320 (c->x86_mask == 0 || c->x86_mask == 1)) /* Tbird rev A1/A2 */ 321 size = 256; 322 } 323 return size; 324 } 325 326 static struct cpu_dev amd_cpu_dev __cpuinitdata = { 327 .c_vendor = "AMD", 328 .c_ident = { "AuthenticAMD" }, 329 .c_models = { 330 { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = 331 { 332 [3] = "486 DX/2", 333 [7] = "486 DX/2-WB", 334 [8] = "486 DX/4", 335 [9] = "486 DX/4-WB", 336 [14] = "Am5x86-WT", 337 [15] = "Am5x86-WB" 338 } 339 }, 340 }, 341 .c_early_init = early_init_amd, 342 .c_init = init_amd, 343 .c_size_cache = amd_size_cache, 344 }; 345 346 int __init amd_init_cpu(void) 347 { 348 cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev; 349 return 0; 350 } 351 352 cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev); 353