1 #include <linux/init.h> 2 #include <linux/kernel.h> 3 4 #include <linux/string.h> 5 #include <linux/bitops.h> 6 #include <linux/smp.h> 7 #include <linux/thread_info.h> 8 #include <linux/module.h> 9 10 #include <asm/processor.h> 11 #include <asm/pgtable.h> 12 #include <asm/msr.h> 13 #include <asm/uaccess.h> 14 15 #include "cpu.h" 16 17 #ifdef CONFIG_X86_LOCAL_APIC 18 #include <asm/mpspec.h> 19 #include <asm/apic.h> 20 #include <mach_apic.h> 21 #endif 22 23 #ifdef CONFIG_X86_INTEL_USERCOPY 24 /* 25 * Alignment at which movsl is preferred for bulk memory copies. 26 */ 27 struct movsl_mask movsl_mask __read_mostly; 28 #endif 29 30 void __cpuinit early_intel_workaround(struct cpuinfo_x86 *c) 31 { 32 if (c->x86_vendor != X86_VENDOR_INTEL) 33 return; 34 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ 35 if (c->x86 == 15 && c->x86_cache_alignment == 64) 36 c->x86_cache_alignment = 128; 37 } 38 39 /* 40 * Early probe support logic for ppro memory erratum #50 41 * 42 * This is called before we do cpu ident work 43 */ 44 45 int __cpuinit ppro_with_ram_bug(void) 46 { 47 /* Uses data from early_cpu_detect now */ 48 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 49 boot_cpu_data.x86 == 6 && 50 boot_cpu_data.x86_model == 1 && 51 boot_cpu_data.x86_mask < 8) { 52 printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n"); 53 return 1; 54 } 55 return 0; 56 } 57 58 59 /* 60 * P4 Xeon errata 037 workaround. 61 * Hardware prefetcher may cause stale data to be loaded into the cache. 62 */ 63 static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) 64 { 65 unsigned long lo, hi; 66 67 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { 68 rdmsr (MSR_IA32_MISC_ENABLE, lo, hi); 69 if ((lo & (1<<9)) == 0) { 70 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); 71 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); 72 lo |= (1<<9); /* Disable hw prefetching */ 73 wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); 74 } 75 } 76 } 77 78 79 /* 80 * find out the number of processor cores on the die 81 */ 82 static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c) 83 { 84 unsigned int eax, ebx, ecx, edx; 85 86 if (c->cpuid_level < 4) 87 return 1; 88 89 /* Intel has a non-standard dependency on %ecx for this CPUID level. */ 90 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); 91 if (eax & 0x1f) 92 return ((eax >> 26) + 1); 93 else 94 return 1; 95 } 96 97 #ifdef CONFIG_X86_F00F_BUG 98 static void __cpuinit trap_init_f00f_bug(void) 99 { 100 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); 101 102 /* 103 * Update the IDT descriptor and reload the IDT so that 104 * it uses the read-only mapped virtual address. 105 */ 106 idt_descr.address = fix_to_virt(FIX_F00F_IDT); 107 load_idt(&idt_descr); 108 } 109 #endif 110 111 static void __cpuinit init_intel(struct cpuinfo_x86 *c) 112 { 113 unsigned int l2 = 0; 114 char *p = NULL; 115 116 #ifdef CONFIG_X86_F00F_BUG 117 /* 118 * All current models of Pentium and Pentium with MMX technology CPUs 119 * have the F0 0F bug, which lets nonprivileged users lock up the system. 120 * Note that the workaround only should be initialized once... 121 */ 122 c->f00f_bug = 0; 123 if (!paravirt_enabled() && c->x86 == 5) { 124 static int f00f_workaround_enabled = 0; 125 126 c->f00f_bug = 1; 127 if ( !f00f_workaround_enabled ) { 128 trap_init_f00f_bug(); 129 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); 130 f00f_workaround_enabled = 1; 131 } 132 } 133 #endif 134 135 select_idle_routine(c); 136 l2 = init_intel_cacheinfo(c); 137 if (c->cpuid_level > 9 ) { 138 unsigned eax = cpuid_eax(10); 139 /* Check for version and the number of counters */ 140 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) 141 set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability); 142 } 143 144 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ 145 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) 146 clear_bit(X86_FEATURE_SEP, c->x86_capability); 147 148 /* Names for the Pentium II/Celeron processors 149 detectable only by also checking the cache size. 150 Dixon is NOT a Celeron. */ 151 if (c->x86 == 6) { 152 switch (c->x86_model) { 153 case 5: 154 if (c->x86_mask == 0) { 155 if (l2 == 0) 156 p = "Celeron (Covington)"; 157 else if (l2 == 256) 158 p = "Mobile Pentium II (Dixon)"; 159 } 160 break; 161 162 case 6: 163 if (l2 == 128) 164 p = "Celeron (Mendocino)"; 165 else if (c->x86_mask == 0 || c->x86_mask == 5) 166 p = "Celeron-A"; 167 break; 168 169 case 8: 170 if (l2 == 128) 171 p = "Celeron (Coppermine)"; 172 break; 173 } 174 } 175 176 if ( p ) 177 strcpy(c->x86_model_id, p); 178 179 c->x86_max_cores = num_cpu_cores(c); 180 181 detect_ht(c); 182 183 /* Work around errata */ 184 Intel_errata_workarounds(c); 185 186 #ifdef CONFIG_X86_INTEL_USERCOPY 187 /* 188 * Set up the preferred alignment for movsl bulk memory moves 189 */ 190 switch (c->x86) { 191 case 4: /* 486: untested */ 192 break; 193 case 5: /* Old Pentia: untested */ 194 break; 195 case 6: /* PII/PIII only like movsl with 8-byte alignment */ 196 movsl_mask.mask = 7; 197 break; 198 case 15: /* P4 is OK down to 8-byte alignment */ 199 movsl_mask.mask = 7; 200 break; 201 } 202 #endif 203 204 if (c->x86 == 15) { 205 set_bit(X86_FEATURE_P4, c->x86_capability); 206 set_bit(X86_FEATURE_SYNC_RDTSC, c->x86_capability); 207 } 208 if (c->x86 == 6) 209 set_bit(X86_FEATURE_P3, c->x86_capability); 210 if ((c->x86 == 0xf && c->x86_model >= 0x03) || 211 (c->x86 == 0x6 && c->x86_model >= 0x0e)) 212 set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); 213 214 if (cpu_has_ds) { 215 unsigned int l1; 216 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); 217 if (!(l1 & (1<<11))) 218 set_bit(X86_FEATURE_BTS, c->x86_capability); 219 if (!(l1 & (1<<12))) 220 set_bit(X86_FEATURE_PEBS, c->x86_capability); 221 } 222 } 223 224 static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size) 225 { 226 /* Intel PIII Tualatin. This comes in two flavours. 227 * One has 256kb of cache, the other 512. We have no way 228 * to determine which, so we use a boottime override 229 * for the 512kb model, and assume 256 otherwise. 230 */ 231 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) 232 size = 256; 233 return size; 234 } 235 236 static struct cpu_dev intel_cpu_dev __cpuinitdata = { 237 .c_vendor = "Intel", 238 .c_ident = { "GenuineIntel" }, 239 .c_models = { 240 { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = 241 { 242 [0] = "486 DX-25/33", 243 [1] = "486 DX-50", 244 [2] = "486 SX", 245 [3] = "486 DX/2", 246 [4] = "486 SL", 247 [5] = "486 SX/2", 248 [7] = "486 DX/2-WB", 249 [8] = "486 DX/4", 250 [9] = "486 DX/4-WB" 251 } 252 }, 253 { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = 254 { 255 [0] = "Pentium 60/66 A-step", 256 [1] = "Pentium 60/66", 257 [2] = "Pentium 75 - 200", 258 [3] = "OverDrive PODP5V83", 259 [4] = "Pentium MMX", 260 [7] = "Mobile Pentium 75 - 200", 261 [8] = "Mobile Pentium MMX" 262 } 263 }, 264 { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = 265 { 266 [0] = "Pentium Pro A-step", 267 [1] = "Pentium Pro", 268 [3] = "Pentium II (Klamath)", 269 [4] = "Pentium II (Deschutes)", 270 [5] = "Pentium II (Deschutes)", 271 [6] = "Mobile Pentium II", 272 [7] = "Pentium III (Katmai)", 273 [8] = "Pentium III (Coppermine)", 274 [10] = "Pentium III (Cascades)", 275 [11] = "Pentium III (Tualatin)", 276 } 277 }, 278 { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names = 279 { 280 [0] = "Pentium 4 (Unknown)", 281 [1] = "Pentium 4 (Willamette)", 282 [2] = "Pentium 4 (Northwood)", 283 [4] = "Pentium 4 (Foster)", 284 [5] = "Pentium 4 (Foster)", 285 } 286 }, 287 }, 288 .c_init = init_intel, 289 .c_size_cache = intel_size_cache, 290 }; 291 292 __init int intel_cpu_init(void) 293 { 294 cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev; 295 return 0; 296 } 297 298 #ifndef CONFIG_X86_CMPXCHG 299 unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new) 300 { 301 u8 prev; 302 unsigned long flags; 303 304 /* Poor man's cmpxchg for 386. Unsuitable for SMP */ 305 local_irq_save(flags); 306 prev = *(u8 *)ptr; 307 if (prev == old) 308 *(u8 *)ptr = new; 309 local_irq_restore(flags); 310 return prev; 311 } 312 EXPORT_SYMBOL(cmpxchg_386_u8); 313 314 unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new) 315 { 316 u16 prev; 317 unsigned long flags; 318 319 /* Poor man's cmpxchg for 386. Unsuitable for SMP */ 320 local_irq_save(flags); 321 prev = *(u16 *)ptr; 322 if (prev == old) 323 *(u16 *)ptr = new; 324 local_irq_restore(flags); 325 return prev; 326 } 327 EXPORT_SYMBOL(cmpxchg_386_u16); 328 329 unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new) 330 { 331 u32 prev; 332 unsigned long flags; 333 334 /* Poor man's cmpxchg for 386. Unsuitable for SMP */ 335 local_irq_save(flags); 336 prev = *(u32 *)ptr; 337 if (prev == old) 338 *(u32 *)ptr = new; 339 local_irq_restore(flags); 340 return prev; 341 } 342 EXPORT_SYMBOL(cmpxchg_386_u32); 343 #endif 344 345 // arch_initcall(intel_cpu_init); 346 347