1 #include <linux/init.h> 2 #include <linux/kernel.h> 3 4 #include <linux/string.h> 5 #include <linux/bitops.h> 6 #include <linux/smp.h> 7 #include <linux/thread_info.h> 8 #include <linux/module.h> 9 10 #include <asm/processor.h> 11 #include <asm/pgtable.h> 12 #include <asm/msr.h> 13 #include <asm/uaccess.h> 14 #include <asm/ptrace.h> 15 #include <asm/ds.h> 16 17 #include "cpu.h" 18 19 #ifdef CONFIG_X86_LOCAL_APIC 20 #include <asm/mpspec.h> 21 #include <asm/apic.h> 22 #include <mach_apic.h> 23 #endif 24 25 #ifdef CONFIG_X86_INTEL_USERCOPY 26 /* 27 * Alignment at which movsl is preferred for bulk memory copies. 28 */ 29 struct movsl_mask movsl_mask __read_mostly; 30 #endif 31 32 void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 33 { 34 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ 35 if (c->x86 == 15 && c->x86_cache_alignment == 64) 36 c->x86_cache_alignment = 128; 37 if ((c->x86 == 0xf && c->x86_model >= 0x03) || 38 (c->x86 == 0x6 && c->x86_model >= 0x0e)) 39 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 40 } 41 42 /* 43 * Early probe support logic for ppro memory erratum #50 44 * 45 * This is called before we do cpu ident work 46 */ 47 48 int __cpuinit ppro_with_ram_bug(void) 49 { 50 /* Uses data from early_cpu_detect now */ 51 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 52 boot_cpu_data.x86 == 6 && 53 boot_cpu_data.x86_model == 1 && 54 boot_cpu_data.x86_mask < 8) { 55 printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n"); 56 return 1; 57 } 58 return 0; 59 } 60 61 62 /* 63 * P4 Xeon errata 037 workaround. 64 * Hardware prefetcher may cause stale data to be loaded into the cache. 65 */ 66 static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) 67 { 68 unsigned long lo, hi; 69 70 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { 71 rdmsr (MSR_IA32_MISC_ENABLE, lo, hi); 72 if ((lo & (1<<9)) == 0) { 73 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); 74 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); 75 lo |= (1<<9); /* Disable hw prefetching */ 76 wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); 77 } 78 } 79 } 80 81 82 /* 83 * find out the number of processor cores on the die 84 */ 85 static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c) 86 { 87 unsigned int eax, ebx, ecx, edx; 88 89 if (c->cpuid_level < 4) 90 return 1; 91 92 /* Intel has a non-standard dependency on %ecx for this CPUID level. */ 93 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); 94 if (eax & 0x1f) 95 return ((eax >> 26) + 1); 96 else 97 return 1; 98 } 99 100 #ifdef CONFIG_X86_F00F_BUG 101 static void __cpuinit trap_init_f00f_bug(void) 102 { 103 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); 104 105 /* 106 * Update the IDT descriptor and reload the IDT so that 107 * it uses the read-only mapped virtual address. 108 */ 109 idt_descr.address = fix_to_virt(FIX_F00F_IDT); 110 load_idt(&idt_descr); 111 } 112 #endif 113 114 static void __cpuinit init_intel(struct cpuinfo_x86 *c) 115 { 116 unsigned int l2 = 0; 117 char *p = NULL; 118 119 early_init_intel(c); 120 121 #ifdef CONFIG_X86_F00F_BUG 122 /* 123 * All current models of Pentium and Pentium with MMX technology CPUs 124 * have the F0 0F bug, which lets nonprivileged users lock up the system. 125 * Note that the workaround only should be initialized once... 126 */ 127 c->f00f_bug = 0; 128 if (!paravirt_enabled() && c->x86 == 5) { 129 static int f00f_workaround_enabled = 0; 130 131 c->f00f_bug = 1; 132 if ( !f00f_workaround_enabled ) { 133 trap_init_f00f_bug(); 134 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); 135 f00f_workaround_enabled = 1; 136 } 137 } 138 #endif 139 140 l2 = init_intel_cacheinfo(c); 141 if (c->cpuid_level > 9 ) { 142 unsigned eax = cpuid_eax(10); 143 /* Check for version and the number of counters */ 144 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) 145 set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability); 146 } 147 148 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ 149 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) 150 clear_bit(X86_FEATURE_SEP, c->x86_capability); 151 152 /* Names for the Pentium II/Celeron processors 153 detectable only by also checking the cache size. 154 Dixon is NOT a Celeron. */ 155 if (c->x86 == 6) { 156 switch (c->x86_model) { 157 case 5: 158 if (c->x86_mask == 0) { 159 if (l2 == 0) 160 p = "Celeron (Covington)"; 161 else if (l2 == 256) 162 p = "Mobile Pentium II (Dixon)"; 163 } 164 break; 165 166 case 6: 167 if (l2 == 128) 168 p = "Celeron (Mendocino)"; 169 else if (c->x86_mask == 0 || c->x86_mask == 5) 170 p = "Celeron-A"; 171 break; 172 173 case 8: 174 if (l2 == 128) 175 p = "Celeron (Coppermine)"; 176 break; 177 } 178 } 179 180 if ( p ) 181 strcpy(c->x86_model_id, p); 182 183 c->x86_max_cores = num_cpu_cores(c); 184 185 detect_ht(c); 186 187 /* Work around errata */ 188 Intel_errata_workarounds(c); 189 190 #ifdef CONFIG_X86_INTEL_USERCOPY 191 /* 192 * Set up the preferred alignment for movsl bulk memory moves 193 */ 194 switch (c->x86) { 195 case 4: /* 486: untested */ 196 break; 197 case 5: /* Old Pentia: untested */ 198 break; 199 case 6: /* PII/PIII only like movsl with 8-byte alignment */ 200 movsl_mask.mask = 7; 201 break; 202 case 15: /* P4 is OK down to 8-byte alignment */ 203 movsl_mask.mask = 7; 204 break; 205 } 206 #endif 207 208 if (cpu_has_xmm2) 209 set_bit(X86_FEATURE_LFENCE_RDTSC, c->x86_capability); 210 if (c->x86 == 15) { 211 set_bit(X86_FEATURE_P4, c->x86_capability); 212 } 213 if (c->x86 == 6) 214 set_bit(X86_FEATURE_P3, c->x86_capability); 215 if (cpu_has_ds) { 216 unsigned int l1; 217 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); 218 if (!(l1 & (1<<11))) 219 set_bit(X86_FEATURE_BTS, c->x86_capability); 220 if (!(l1 & (1<<12))) 221 set_bit(X86_FEATURE_PEBS, c->x86_capability); 222 } 223 224 if (cpu_has_bts) 225 ds_init_intel(c); 226 } 227 228 static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size) 229 { 230 /* Intel PIII Tualatin. This comes in two flavours. 231 * One has 256kb of cache, the other 512. We have no way 232 * to determine which, so we use a boottime override 233 * for the 512kb model, and assume 256 otherwise. 234 */ 235 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) 236 size = 256; 237 return size; 238 } 239 240 static struct cpu_dev intel_cpu_dev __cpuinitdata = { 241 .c_vendor = "Intel", 242 .c_ident = { "GenuineIntel" }, 243 .c_models = { 244 { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = 245 { 246 [0] = "486 DX-25/33", 247 [1] = "486 DX-50", 248 [2] = "486 SX", 249 [3] = "486 DX/2", 250 [4] = "486 SL", 251 [5] = "486 SX/2", 252 [7] = "486 DX/2-WB", 253 [8] = "486 DX/4", 254 [9] = "486 DX/4-WB" 255 } 256 }, 257 { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = 258 { 259 [0] = "Pentium 60/66 A-step", 260 [1] = "Pentium 60/66", 261 [2] = "Pentium 75 - 200", 262 [3] = "OverDrive PODP5V83", 263 [4] = "Pentium MMX", 264 [7] = "Mobile Pentium 75 - 200", 265 [8] = "Mobile Pentium MMX" 266 } 267 }, 268 { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = 269 { 270 [0] = "Pentium Pro A-step", 271 [1] = "Pentium Pro", 272 [3] = "Pentium II (Klamath)", 273 [4] = "Pentium II (Deschutes)", 274 [5] = "Pentium II (Deschutes)", 275 [6] = "Mobile Pentium II", 276 [7] = "Pentium III (Katmai)", 277 [8] = "Pentium III (Coppermine)", 278 [10] = "Pentium III (Cascades)", 279 [11] = "Pentium III (Tualatin)", 280 } 281 }, 282 { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names = 283 { 284 [0] = "Pentium 4 (Unknown)", 285 [1] = "Pentium 4 (Willamette)", 286 [2] = "Pentium 4 (Northwood)", 287 [4] = "Pentium 4 (Foster)", 288 [5] = "Pentium 4 (Foster)", 289 } 290 }, 291 }, 292 .c_init = init_intel, 293 .c_size_cache = intel_size_cache, 294 }; 295 296 __init int intel_cpu_init(void) 297 { 298 cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev; 299 return 0; 300 } 301 302 #ifndef CONFIG_X86_CMPXCHG 303 unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new) 304 { 305 u8 prev; 306 unsigned long flags; 307 308 /* Poor man's cmpxchg for 386. Unsuitable for SMP */ 309 local_irq_save(flags); 310 prev = *(u8 *)ptr; 311 if (prev == old) 312 *(u8 *)ptr = new; 313 local_irq_restore(flags); 314 return prev; 315 } 316 EXPORT_SYMBOL(cmpxchg_386_u8); 317 318 unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new) 319 { 320 u16 prev; 321 unsigned long flags; 322 323 /* Poor man's cmpxchg for 386. Unsuitable for SMP */ 324 local_irq_save(flags); 325 prev = *(u16 *)ptr; 326 if (prev == old) 327 *(u16 *)ptr = new; 328 local_irq_restore(flags); 329 return prev; 330 } 331 EXPORT_SYMBOL(cmpxchg_386_u16); 332 333 unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new) 334 { 335 u32 prev; 336 unsigned long flags; 337 338 /* Poor man's cmpxchg for 386. Unsuitable for SMP */ 339 local_irq_save(flags); 340 prev = *(u32 *)ptr; 341 if (prev == old) 342 *(u32 *)ptr = new; 343 local_irq_restore(flags); 344 return prev; 345 } 346 EXPORT_SYMBOL(cmpxchg_386_u32); 347 #endif 348 349 #ifndef CONFIG_X86_CMPXCHG64 350 unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new) 351 { 352 u64 prev; 353 unsigned long flags; 354 355 /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */ 356 local_irq_save(flags); 357 prev = *(u64 *)ptr; 358 if (prev == old) 359 *(u64 *)ptr = new; 360 local_irq_restore(flags); 361 return prev; 362 } 363 EXPORT_SYMBOL(cmpxchg_486_u64); 364 #endif 365 366 // arch_initcall(intel_cpu_init); 367 368