1 #include <linux/bootmem.h> 2 #include <linux/linkage.h> 3 #include <linux/bitops.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/percpu.h> 7 #include <linux/string.h> 8 #include <linux/delay.h> 9 #include <linux/sched.h> 10 #include <linux/init.h> 11 #include <linux/kprobes.h> 12 #include <linux/kgdb.h> 13 #include <linux/smp.h> 14 #include <linux/io.h> 15 16 #include <asm/stackprotector.h> 17 #include <asm/perf_event.h> 18 #include <asm/mmu_context.h> 19 #include <asm/archrandom.h> 20 #include <asm/hypervisor.h> 21 #include <asm/processor.h> 22 #include <asm/debugreg.h> 23 #include <asm/sections.h> 24 #include <asm/vsyscall.h> 25 #include <linux/topology.h> 26 #include <linux/cpumask.h> 27 #include <asm/pgtable.h> 28 #include <linux/atomic.h> 29 #include <asm/proto.h> 30 #include <asm/setup.h> 31 #include <asm/apic.h> 32 #include <asm/desc.h> 33 #include <asm/i387.h> 34 #include <asm/fpu-internal.h> 35 #include <asm/mtrr.h> 36 #include <linux/numa.h> 37 #include <asm/asm.h> 38 #include <asm/cpu.h> 39 #include <asm/mce.h> 40 #include <asm/msr.h> 41 #include <asm/pat.h> 42 #include <asm/microcode.h> 43 #include <asm/microcode_intel.h> 44 45 #ifdef CONFIG_X86_LOCAL_APIC 46 #include <asm/uv/uv.h> 47 #endif 48 49 #include "cpu.h" 50 51 /* all of these masks are initialized in setup_cpu_local_masks() */ 52 cpumask_var_t cpu_initialized_mask; 53 cpumask_var_t cpu_callout_mask; 54 cpumask_var_t cpu_callin_mask; 55 56 /* representing cpus for which sibling maps can be computed */ 57 cpumask_var_t cpu_sibling_setup_mask; 58 59 /* correctly size the local cpu masks */ 60 void __init setup_cpu_local_masks(void) 61 { 62 alloc_bootmem_cpumask_var(&cpu_initialized_mask); 63 alloc_bootmem_cpumask_var(&cpu_callin_mask); 64 alloc_bootmem_cpumask_var(&cpu_callout_mask); 65 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 66 } 67 68 static void default_init(struct cpuinfo_x86 *c) 69 { 70 #ifdef CONFIG_X86_64 71 cpu_detect_cache_sizes(c); 72 #else 73 /* Not much we can do here... */ 74 /* Check if at least it has cpuid */ 75 if (c->cpuid_level == -1) { 76 /* No cpuid. It must be an ancient CPU */ 77 if (c->x86 == 4) 78 strcpy(c->x86_model_id, "486"); 79 else if (c->x86 == 3) 80 strcpy(c->x86_model_id, "386"); 81 } 82 #endif 83 } 84 85 static const struct cpu_dev default_cpu = { 86 .c_init = default_init, 87 .c_vendor = "Unknown", 88 .c_x86_vendor = X86_VENDOR_UNKNOWN, 89 }; 90 91 static const struct cpu_dev *this_cpu = &default_cpu; 92 93 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 94 #ifdef CONFIG_X86_64 95 /* 96 * We need valid kernel segments for data and code in long mode too 97 * IRET will check the segment types kkeil 2000/10/28 98 * Also sysret mandates a special GDT layout 99 * 100 * TLS descriptors are currently at a different place compared to i386. 101 * Hopefully nobody expects them at a fixed place (Wine?) 102 */ 103 [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), 104 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), 105 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), 106 [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), 107 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), 108 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), 109 #else 110 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), 111 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 112 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), 113 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), 114 /* 115 * Segments used for calling PnP BIOS have byte granularity. 116 * They code segments and data segments have fixed 64k limits, 117 * the transfer segment sizes are set at run time. 118 */ 119 /* 32-bit code */ 120 [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 121 /* 16-bit code */ 122 [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 123 /* 16-bit data */ 124 [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), 125 /* 16-bit data */ 126 [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), 127 /* 16-bit data */ 128 [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), 129 /* 130 * The APM segments have byte granularity and their bases 131 * are set at run time. All have 64k limits. 132 */ 133 /* 32-bit code */ 134 [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 135 /* 16-bit code */ 136 [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 137 /* data */ 138 [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), 139 140 [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 141 [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 142 GDT_STACK_CANARY_INIT 143 #endif 144 } }; 145 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 146 147 static int __init x86_xsave_setup(char *s) 148 { 149 setup_clear_cpu_cap(X86_FEATURE_XSAVE); 150 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); 151 setup_clear_cpu_cap(X86_FEATURE_AVX); 152 setup_clear_cpu_cap(X86_FEATURE_AVX2); 153 return 1; 154 } 155 __setup("noxsave", x86_xsave_setup); 156 157 static int __init x86_xsaveopt_setup(char *s) 158 { 159 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); 160 return 1; 161 } 162 __setup("noxsaveopt", x86_xsaveopt_setup); 163 164 #ifdef CONFIG_X86_32 165 static int cachesize_override = -1; 166 static int disable_x86_serial_nr = 1; 167 168 static int __init cachesize_setup(char *str) 169 { 170 get_option(&str, &cachesize_override); 171 return 1; 172 } 173 __setup("cachesize=", cachesize_setup); 174 175 static int __init x86_fxsr_setup(char *s) 176 { 177 setup_clear_cpu_cap(X86_FEATURE_FXSR); 178 setup_clear_cpu_cap(X86_FEATURE_XMM); 179 return 1; 180 } 181 __setup("nofxsr", x86_fxsr_setup); 182 183 static int __init x86_sep_setup(char *s) 184 { 185 setup_clear_cpu_cap(X86_FEATURE_SEP); 186 return 1; 187 } 188 __setup("nosep", x86_sep_setup); 189 190 /* Standard macro to see if a specific flag is changeable */ 191 static inline int flag_is_changeable_p(u32 flag) 192 { 193 u32 f1, f2; 194 195 /* 196 * Cyrix and IDT cpus allow disabling of CPUID 197 * so the code below may return different results 198 * when it is executed before and after enabling 199 * the CPUID. Add "volatile" to not allow gcc to 200 * optimize the subsequent calls to this function. 201 */ 202 asm volatile ("pushfl \n\t" 203 "pushfl \n\t" 204 "popl %0 \n\t" 205 "movl %0, %1 \n\t" 206 "xorl %2, %0 \n\t" 207 "pushl %0 \n\t" 208 "popfl \n\t" 209 "pushfl \n\t" 210 "popl %0 \n\t" 211 "popfl \n\t" 212 213 : "=&r" (f1), "=&r" (f2) 214 : "ir" (flag)); 215 216 return ((f1^f2) & flag) != 0; 217 } 218 219 /* Probe for the CPUID instruction */ 220 int have_cpuid_p(void) 221 { 222 return flag_is_changeable_p(X86_EFLAGS_ID); 223 } 224 225 static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 226 { 227 unsigned long lo, hi; 228 229 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) 230 return; 231 232 /* Disable processor serial number: */ 233 234 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 235 lo |= 0x200000; 236 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 237 238 printk(KERN_NOTICE "CPU serial number disabled.\n"); 239 clear_cpu_cap(c, X86_FEATURE_PN); 240 241 /* Disabling the serial number may affect the cpuid level */ 242 c->cpuid_level = cpuid_eax(0); 243 } 244 245 static int __init x86_serial_nr_setup(char *s) 246 { 247 disable_x86_serial_nr = 0; 248 return 1; 249 } 250 __setup("serialnumber", x86_serial_nr_setup); 251 #else 252 static inline int flag_is_changeable_p(u32 flag) 253 { 254 return 1; 255 } 256 static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 257 { 258 } 259 #endif 260 261 static __init int setup_disable_smep(char *arg) 262 { 263 setup_clear_cpu_cap(X86_FEATURE_SMEP); 264 return 1; 265 } 266 __setup("nosmep", setup_disable_smep); 267 268 static __always_inline void setup_smep(struct cpuinfo_x86 *c) 269 { 270 if (cpu_has(c, X86_FEATURE_SMEP)) 271 set_in_cr4(X86_CR4_SMEP); 272 } 273 274 static __init int setup_disable_smap(char *arg) 275 { 276 setup_clear_cpu_cap(X86_FEATURE_SMAP); 277 return 1; 278 } 279 __setup("nosmap", setup_disable_smap); 280 281 static __always_inline void setup_smap(struct cpuinfo_x86 *c) 282 { 283 unsigned long eflags; 284 285 /* This should have been cleared long ago */ 286 raw_local_save_flags(eflags); 287 BUG_ON(eflags & X86_EFLAGS_AC); 288 289 if (cpu_has(c, X86_FEATURE_SMAP)) { 290 #ifdef CONFIG_X86_SMAP 291 set_in_cr4(X86_CR4_SMAP); 292 #else 293 clear_in_cr4(X86_CR4_SMAP); 294 #endif 295 } 296 } 297 298 /* 299 * Some CPU features depend on higher CPUID levels, which may not always 300 * be available due to CPUID level capping or broken virtualization 301 * software. Add those features to this table to auto-disable them. 302 */ 303 struct cpuid_dependent_feature { 304 u32 feature; 305 u32 level; 306 }; 307 308 static const struct cpuid_dependent_feature 309 cpuid_dependent_features[] = { 310 { X86_FEATURE_MWAIT, 0x00000005 }, 311 { X86_FEATURE_DCA, 0x00000009 }, 312 { X86_FEATURE_XSAVE, 0x0000000d }, 313 { 0, 0 } 314 }; 315 316 static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 317 { 318 const struct cpuid_dependent_feature *df; 319 320 for (df = cpuid_dependent_features; df->feature; df++) { 321 322 if (!cpu_has(c, df->feature)) 323 continue; 324 /* 325 * Note: cpuid_level is set to -1 if unavailable, but 326 * extended_extended_level is set to 0 if unavailable 327 * and the legitimate extended levels are all negative 328 * when signed; hence the weird messing around with 329 * signs here... 330 */ 331 if (!((s32)df->level < 0 ? 332 (u32)df->level > (u32)c->extended_cpuid_level : 333 (s32)df->level > (s32)c->cpuid_level)) 334 continue; 335 336 clear_cpu_cap(c, df->feature); 337 if (!warn) 338 continue; 339 340 printk(KERN_WARNING 341 "CPU: CPU feature %s disabled, no CPUID level 0x%x\n", 342 x86_cap_flags[df->feature], df->level); 343 } 344 } 345 346 /* 347 * Naming convention should be: <Name> [(<Codename>)] 348 * This table only is used unless init_<vendor>() below doesn't set it; 349 * in particular, if CPUID levels 0x80000002..4 are supported, this 350 * isn't used 351 */ 352 353 /* Look up CPU names by table lookup. */ 354 static const char *table_lookup_model(struct cpuinfo_x86 *c) 355 { 356 #ifdef CONFIG_X86_32 357 const struct legacy_cpu_model_info *info; 358 359 if (c->x86_model >= 16) 360 return NULL; /* Range check */ 361 362 if (!this_cpu) 363 return NULL; 364 365 info = this_cpu->legacy_models; 366 367 while (info->family) { 368 if (info->family == c->x86) 369 return info->model_names[c->x86_model]; 370 info++; 371 } 372 #endif 373 return NULL; /* Not found */ 374 } 375 376 __u32 cpu_caps_cleared[NCAPINTS]; 377 __u32 cpu_caps_set[NCAPINTS]; 378 379 void load_percpu_segment(int cpu) 380 { 381 #ifdef CONFIG_X86_32 382 loadsegment(fs, __KERNEL_PERCPU); 383 #else 384 loadsegment(gs, 0); 385 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); 386 #endif 387 load_stack_canary_segment(); 388 } 389 390 /* 391 * Current gdt points %fs at the "master" per-cpu area: after this, 392 * it's on the real one. 393 */ 394 void switch_to_new_gdt(int cpu) 395 { 396 struct desc_ptr gdt_descr; 397 398 gdt_descr.address = (long)get_cpu_gdt_table(cpu); 399 gdt_descr.size = GDT_SIZE - 1; 400 load_gdt(&gdt_descr); 401 /* Reload the per-cpu base */ 402 403 load_percpu_segment(cpu); 404 } 405 406 static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 407 408 static void get_model_name(struct cpuinfo_x86 *c) 409 { 410 unsigned int *v; 411 char *p, *q; 412 413 if (c->extended_cpuid_level < 0x80000004) 414 return; 415 416 v = (unsigned int *)c->x86_model_id; 417 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); 418 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); 419 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 420 c->x86_model_id[48] = 0; 421 422 /* 423 * Intel chips right-justify this string for some dumb reason; 424 * undo that brain damage: 425 */ 426 p = q = &c->x86_model_id[0]; 427 while (*p == ' ') 428 p++; 429 if (p != q) { 430 while (*p) 431 *q++ = *p++; 432 while (q <= &c->x86_model_id[48]) 433 *q++ = '\0'; /* Zero-pad the rest */ 434 } 435 } 436 437 void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) 438 { 439 unsigned int n, dummy, ebx, ecx, edx, l2size; 440 441 n = c->extended_cpuid_level; 442 443 if (n >= 0x80000005) { 444 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); 445 c->x86_cache_size = (ecx>>24) + (edx>>24); 446 #ifdef CONFIG_X86_64 447 /* On K8 L1 TLB is inclusive, so don't count it */ 448 c->x86_tlbsize = 0; 449 #endif 450 } 451 452 if (n < 0x80000006) /* Some chips just has a large L1. */ 453 return; 454 455 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); 456 l2size = ecx >> 16; 457 458 #ifdef CONFIG_X86_64 459 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); 460 #else 461 /* do processor-specific cache resizing */ 462 if (this_cpu->legacy_cache_size) 463 l2size = this_cpu->legacy_cache_size(c, l2size); 464 465 /* Allow user to override all this if necessary. */ 466 if (cachesize_override != -1) 467 l2size = cachesize_override; 468 469 if (l2size == 0) 470 return; /* Again, no L2 cache is possible */ 471 #endif 472 473 c->x86_cache_size = l2size; 474 } 475 476 u16 __read_mostly tlb_lli_4k[NR_INFO]; 477 u16 __read_mostly tlb_lli_2m[NR_INFO]; 478 u16 __read_mostly tlb_lli_4m[NR_INFO]; 479 u16 __read_mostly tlb_lld_4k[NR_INFO]; 480 u16 __read_mostly tlb_lld_2m[NR_INFO]; 481 u16 __read_mostly tlb_lld_4m[NR_INFO]; 482 u16 __read_mostly tlb_lld_1g[NR_INFO]; 483 484 /* 485 * tlb_flushall_shift shows the balance point in replacing cr3 write 486 * with multiple 'invlpg'. It will do this replacement when 487 * flush_tlb_lines <= active_lines/2^tlb_flushall_shift. 488 * If tlb_flushall_shift is -1, means the replacement will be disabled. 489 */ 490 s8 __read_mostly tlb_flushall_shift = -1; 491 492 void cpu_detect_tlb(struct cpuinfo_x86 *c) 493 { 494 if (this_cpu->c_detect_tlb) 495 this_cpu->c_detect_tlb(c); 496 497 printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" 498 "Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n" 499 "tlb_flushall_shift: %d\n", 500 tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], 501 tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES], 502 tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES], 503 tlb_lld_1g[ENTRIES], tlb_flushall_shift); 504 } 505 506 void detect_ht(struct cpuinfo_x86 *c) 507 { 508 #ifdef CONFIG_X86_HT 509 u32 eax, ebx, ecx, edx; 510 int index_msb, core_bits; 511 static bool printed; 512 513 if (!cpu_has(c, X86_FEATURE_HT)) 514 return; 515 516 if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) 517 goto out; 518 519 if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) 520 return; 521 522 cpuid(1, &eax, &ebx, &ecx, &edx); 523 524 smp_num_siblings = (ebx & 0xff0000) >> 16; 525 526 if (smp_num_siblings == 1) { 527 printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n"); 528 goto out; 529 } 530 531 if (smp_num_siblings <= 1) 532 goto out; 533 534 index_msb = get_count_order(smp_num_siblings); 535 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); 536 537 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 538 539 index_msb = get_count_order(smp_num_siblings); 540 541 core_bits = get_count_order(c->x86_max_cores); 542 543 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & 544 ((1 << core_bits) - 1); 545 546 out: 547 if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { 548 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 549 c->phys_proc_id); 550 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 551 c->cpu_core_id); 552 printed = 1; 553 } 554 #endif 555 } 556 557 static void get_cpu_vendor(struct cpuinfo_x86 *c) 558 { 559 char *v = c->x86_vendor_id; 560 int i; 561 562 for (i = 0; i < X86_VENDOR_NUM; i++) { 563 if (!cpu_devs[i]) 564 break; 565 566 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 567 (cpu_devs[i]->c_ident[1] && 568 !strcmp(v, cpu_devs[i]->c_ident[1]))) { 569 570 this_cpu = cpu_devs[i]; 571 c->x86_vendor = this_cpu->c_x86_vendor; 572 return; 573 } 574 } 575 576 printk_once(KERN_ERR 577 "CPU: vendor_id '%s' unknown, using generic init.\n" \ 578 "CPU: Your system may be unstable.\n", v); 579 580 c->x86_vendor = X86_VENDOR_UNKNOWN; 581 this_cpu = &default_cpu; 582 } 583 584 void cpu_detect(struct cpuinfo_x86 *c) 585 { 586 /* Get vendor name */ 587 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 588 (unsigned int *)&c->x86_vendor_id[0], 589 (unsigned int *)&c->x86_vendor_id[8], 590 (unsigned int *)&c->x86_vendor_id[4]); 591 592 c->x86 = 4; 593 /* Intel-defined flags: level 0x00000001 */ 594 if (c->cpuid_level >= 0x00000001) { 595 u32 junk, tfms, cap0, misc; 596 597 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 598 c->x86 = (tfms >> 8) & 0xf; 599 c->x86_model = (tfms >> 4) & 0xf; 600 c->x86_mask = tfms & 0xf; 601 602 if (c->x86 == 0xf) 603 c->x86 += (tfms >> 20) & 0xff; 604 if (c->x86 >= 0x6) 605 c->x86_model += ((tfms >> 16) & 0xf) << 4; 606 607 if (cap0 & (1<<19)) { 608 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 609 c->x86_cache_alignment = c->x86_clflush_size; 610 } 611 } 612 } 613 614 void get_cpu_cap(struct cpuinfo_x86 *c) 615 { 616 u32 tfms, xlvl; 617 u32 ebx; 618 619 /* Intel-defined flags: level 0x00000001 */ 620 if (c->cpuid_level >= 0x00000001) { 621 u32 capability, excap; 622 623 cpuid(0x00000001, &tfms, &ebx, &excap, &capability); 624 c->x86_capability[0] = capability; 625 c->x86_capability[4] = excap; 626 } 627 628 /* Additional Intel-defined flags: level 0x00000007 */ 629 if (c->cpuid_level >= 0x00000007) { 630 u32 eax, ebx, ecx, edx; 631 632 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); 633 634 c->x86_capability[9] = ebx; 635 } 636 637 /* AMD-defined flags: level 0x80000001 */ 638 xlvl = cpuid_eax(0x80000000); 639 c->extended_cpuid_level = xlvl; 640 641 if ((xlvl & 0xffff0000) == 0x80000000) { 642 if (xlvl >= 0x80000001) { 643 c->x86_capability[1] = cpuid_edx(0x80000001); 644 c->x86_capability[6] = cpuid_ecx(0x80000001); 645 } 646 } 647 648 if (c->extended_cpuid_level >= 0x80000008) { 649 u32 eax = cpuid_eax(0x80000008); 650 651 c->x86_virt_bits = (eax >> 8) & 0xff; 652 c->x86_phys_bits = eax & 0xff; 653 } 654 #ifdef CONFIG_X86_32 655 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) 656 c->x86_phys_bits = 36; 657 #endif 658 659 if (c->extended_cpuid_level >= 0x80000007) 660 c->x86_power = cpuid_edx(0x80000007); 661 662 init_scattered_cpuid_features(c); 663 } 664 665 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) 666 { 667 #ifdef CONFIG_X86_32 668 int i; 669 670 /* 671 * First of all, decide if this is a 486 or higher 672 * It's a 486 if we can modify the AC flag 673 */ 674 if (flag_is_changeable_p(X86_EFLAGS_AC)) 675 c->x86 = 4; 676 else 677 c->x86 = 3; 678 679 for (i = 0; i < X86_VENDOR_NUM; i++) 680 if (cpu_devs[i] && cpu_devs[i]->c_identify) { 681 c->x86_vendor_id[0] = 0; 682 cpu_devs[i]->c_identify(c); 683 if (c->x86_vendor_id[0]) { 684 get_cpu_vendor(c); 685 break; 686 } 687 } 688 #endif 689 } 690 691 /* 692 * Do minimum CPU detection early. 693 * Fields really needed: vendor, cpuid_level, family, model, mask, 694 * cache alignment. 695 * The others are not touched to avoid unwanted side effects. 696 * 697 * WARNING: this function is only called on the BP. Don't add code here 698 * that is supposed to run on all CPUs. 699 */ 700 static void __init early_identify_cpu(struct cpuinfo_x86 *c) 701 { 702 #ifdef CONFIG_X86_64 703 c->x86_clflush_size = 64; 704 c->x86_phys_bits = 36; 705 c->x86_virt_bits = 48; 706 #else 707 c->x86_clflush_size = 32; 708 c->x86_phys_bits = 32; 709 c->x86_virt_bits = 32; 710 #endif 711 c->x86_cache_alignment = c->x86_clflush_size; 712 713 memset(&c->x86_capability, 0, sizeof c->x86_capability); 714 c->extended_cpuid_level = 0; 715 716 if (!have_cpuid_p()) 717 identify_cpu_without_cpuid(c); 718 719 /* cyrix could have cpuid enabled via c_identify()*/ 720 if (!have_cpuid_p()) 721 return; 722 723 cpu_detect(c); 724 get_cpu_vendor(c); 725 get_cpu_cap(c); 726 fpu_detect(c); 727 728 if (this_cpu->c_early_init) 729 this_cpu->c_early_init(c); 730 731 c->cpu_index = 0; 732 filter_cpuid_features(c, false); 733 734 if (this_cpu->c_bsp_init) 735 this_cpu->c_bsp_init(c); 736 737 setup_force_cpu_cap(X86_FEATURE_ALWAYS); 738 } 739 740 void __init early_cpu_init(void) 741 { 742 const struct cpu_dev *const *cdev; 743 int count = 0; 744 745 #ifdef CONFIG_PROCESSOR_SELECT 746 printk(KERN_INFO "KERNEL supported cpus:\n"); 747 #endif 748 749 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { 750 const struct cpu_dev *cpudev = *cdev; 751 752 if (count >= X86_VENDOR_NUM) 753 break; 754 cpu_devs[count] = cpudev; 755 count++; 756 757 #ifdef CONFIG_PROCESSOR_SELECT 758 { 759 unsigned int j; 760 761 for (j = 0; j < 2; j++) { 762 if (!cpudev->c_ident[j]) 763 continue; 764 printk(KERN_INFO " %s %s\n", cpudev->c_vendor, 765 cpudev->c_ident[j]); 766 } 767 } 768 #endif 769 } 770 early_identify_cpu(&boot_cpu_data); 771 } 772 773 /* 774 * The NOPL instruction is supposed to exist on all CPUs of family >= 6; 775 * unfortunately, that's not true in practice because of early VIA 776 * chips and (more importantly) broken virtualizers that are not easy 777 * to detect. In the latter case it doesn't even *fail* reliably, so 778 * probing for it doesn't even work. Disable it completely on 32-bit 779 * unless we can find a reliable way to detect all the broken cases. 780 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). 781 */ 782 static void detect_nopl(struct cpuinfo_x86 *c) 783 { 784 #ifdef CONFIG_X86_32 785 clear_cpu_cap(c, X86_FEATURE_NOPL); 786 #else 787 set_cpu_cap(c, X86_FEATURE_NOPL); 788 #endif 789 } 790 791 static void generic_identify(struct cpuinfo_x86 *c) 792 { 793 c->extended_cpuid_level = 0; 794 795 if (!have_cpuid_p()) 796 identify_cpu_without_cpuid(c); 797 798 /* cyrix could have cpuid enabled via c_identify()*/ 799 if (!have_cpuid_p()) 800 return; 801 802 cpu_detect(c); 803 804 get_cpu_vendor(c); 805 806 get_cpu_cap(c); 807 808 if (c->cpuid_level >= 0x00000001) { 809 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; 810 #ifdef CONFIG_X86_32 811 # ifdef CONFIG_X86_HT 812 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 813 # else 814 c->apicid = c->initial_apicid; 815 # endif 816 #endif 817 c->phys_proc_id = c->initial_apicid; 818 } 819 820 get_model_name(c); /* Default name */ 821 822 detect_nopl(c); 823 } 824 825 /* 826 * This does the hard work of actually picking apart the CPU stuff... 827 */ 828 static void identify_cpu(struct cpuinfo_x86 *c) 829 { 830 int i; 831 832 c->loops_per_jiffy = loops_per_jiffy; 833 c->x86_cache_size = -1; 834 c->x86_vendor = X86_VENDOR_UNKNOWN; 835 c->x86_model = c->x86_mask = 0; /* So far unknown... */ 836 c->x86_vendor_id[0] = '\0'; /* Unset */ 837 c->x86_model_id[0] = '\0'; /* Unset */ 838 c->x86_max_cores = 1; 839 c->x86_coreid_bits = 0; 840 #ifdef CONFIG_X86_64 841 c->x86_clflush_size = 64; 842 c->x86_phys_bits = 36; 843 c->x86_virt_bits = 48; 844 #else 845 c->cpuid_level = -1; /* CPUID not detected */ 846 c->x86_clflush_size = 32; 847 c->x86_phys_bits = 32; 848 c->x86_virt_bits = 32; 849 #endif 850 c->x86_cache_alignment = c->x86_clflush_size; 851 memset(&c->x86_capability, 0, sizeof c->x86_capability); 852 853 generic_identify(c); 854 855 if (this_cpu->c_identify) 856 this_cpu->c_identify(c); 857 858 /* Clear/Set all flags overriden by options, after probe */ 859 for (i = 0; i < NCAPINTS; i++) { 860 c->x86_capability[i] &= ~cpu_caps_cleared[i]; 861 c->x86_capability[i] |= cpu_caps_set[i]; 862 } 863 864 #ifdef CONFIG_X86_64 865 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 866 #endif 867 868 /* 869 * Vendor-specific initialization. In this section we 870 * canonicalize the feature flags, meaning if there are 871 * features a certain CPU supports which CPUID doesn't 872 * tell us, CPUID claiming incorrect flags, or other bugs, 873 * we handle them here. 874 * 875 * At the end of this section, c->x86_capability better 876 * indicate the features this CPU genuinely supports! 877 */ 878 if (this_cpu->c_init) 879 this_cpu->c_init(c); 880 881 /* Disable the PN if appropriate */ 882 squash_the_stupid_serial_number(c); 883 884 /* Set up SMEP/SMAP */ 885 setup_smep(c); 886 setup_smap(c); 887 888 /* 889 * The vendor-specific functions might have changed features. 890 * Now we do "generic changes." 891 */ 892 893 /* Filter out anything that depends on CPUID levels we don't have */ 894 filter_cpuid_features(c, true); 895 896 /* If the model name is still unset, do table lookup. */ 897 if (!c->x86_model_id[0]) { 898 const char *p; 899 p = table_lookup_model(c); 900 if (p) 901 strcpy(c->x86_model_id, p); 902 else 903 /* Last resort... */ 904 sprintf(c->x86_model_id, "%02x/%02x", 905 c->x86, c->x86_model); 906 } 907 908 #ifdef CONFIG_X86_64 909 detect_ht(c); 910 #endif 911 912 init_hypervisor(c); 913 x86_init_rdrand(c); 914 915 /* 916 * Clear/Set all flags overriden by options, need do it 917 * before following smp all cpus cap AND. 918 */ 919 for (i = 0; i < NCAPINTS; i++) { 920 c->x86_capability[i] &= ~cpu_caps_cleared[i]; 921 c->x86_capability[i] |= cpu_caps_set[i]; 922 } 923 924 /* 925 * On SMP, boot_cpu_data holds the common feature set between 926 * all CPUs; so make sure that we indicate which features are 927 * common between the CPUs. The first time this routine gets 928 * executed, c == &boot_cpu_data. 929 */ 930 if (c != &boot_cpu_data) { 931 /* AND the already accumulated flags with these */ 932 for (i = 0; i < NCAPINTS; i++) 933 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 934 935 /* OR, i.e. replicate the bug flags */ 936 for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++) 937 c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; 938 } 939 940 /* Init Machine Check Exception if available. */ 941 mcheck_cpu_init(c); 942 943 select_idle_routine(c); 944 945 #ifdef CONFIG_NUMA 946 numa_add_cpu(smp_processor_id()); 947 #endif 948 } 949 950 #ifdef CONFIG_X86_64 951 static void vgetcpu_set_mode(void) 952 { 953 if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP)) 954 vgetcpu_mode = VGETCPU_RDTSCP; 955 else 956 vgetcpu_mode = VGETCPU_LSL; 957 } 958 959 /* May not be __init: called during resume */ 960 static void syscall32_cpu_init(void) 961 { 962 /* Load these always in case some future AMD CPU supports 963 SYSENTER from compat mode too. */ 964 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); 965 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); 966 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target); 967 968 wrmsrl(MSR_CSTAR, ia32_cstar_target); 969 } 970 #endif 971 972 #ifdef CONFIG_X86_32 973 void enable_sep_cpu(void) 974 { 975 int cpu = get_cpu(); 976 struct tss_struct *tss = &per_cpu(init_tss, cpu); 977 978 if (!boot_cpu_has(X86_FEATURE_SEP)) { 979 put_cpu(); 980 return; 981 } 982 983 tss->x86_tss.ss1 = __KERNEL_CS; 984 tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss; 985 wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); 986 wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0); 987 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0); 988 put_cpu(); 989 } 990 #endif 991 992 void __init identify_boot_cpu(void) 993 { 994 identify_cpu(&boot_cpu_data); 995 init_amd_e400_c1e_mask(); 996 #ifdef CONFIG_X86_32 997 sysenter_setup(); 998 enable_sep_cpu(); 999 #else 1000 vgetcpu_set_mode(); 1001 #endif 1002 cpu_detect_tlb(&boot_cpu_data); 1003 } 1004 1005 void identify_secondary_cpu(struct cpuinfo_x86 *c) 1006 { 1007 BUG_ON(c == &boot_cpu_data); 1008 identify_cpu(c); 1009 #ifdef CONFIG_X86_32 1010 enable_sep_cpu(); 1011 #endif 1012 mtrr_ap_init(); 1013 } 1014 1015 struct msr_range { 1016 unsigned min; 1017 unsigned max; 1018 }; 1019 1020 static const struct msr_range msr_range_array[] = { 1021 { 0x00000000, 0x00000418}, 1022 { 0xc0000000, 0xc000040b}, 1023 { 0xc0010000, 0xc0010142}, 1024 { 0xc0011000, 0xc001103b}, 1025 }; 1026 1027 static void __print_cpu_msr(void) 1028 { 1029 unsigned index_min, index_max; 1030 unsigned index; 1031 u64 val; 1032 int i; 1033 1034 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { 1035 index_min = msr_range_array[i].min; 1036 index_max = msr_range_array[i].max; 1037 1038 for (index = index_min; index < index_max; index++) { 1039 if (rdmsrl_safe(index, &val)) 1040 continue; 1041 printk(KERN_INFO " MSR%08x: %016llx\n", index, val); 1042 } 1043 } 1044 } 1045 1046 static int show_msr; 1047 1048 static __init int setup_show_msr(char *arg) 1049 { 1050 int num; 1051 1052 get_option(&arg, &num); 1053 1054 if (num > 0) 1055 show_msr = num; 1056 return 1; 1057 } 1058 __setup("show_msr=", setup_show_msr); 1059 1060 static __init int setup_noclflush(char *arg) 1061 { 1062 setup_clear_cpu_cap(X86_FEATURE_CLFLUSH); 1063 setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT); 1064 return 1; 1065 } 1066 __setup("noclflush", setup_noclflush); 1067 1068 void print_cpu_info(struct cpuinfo_x86 *c) 1069 { 1070 const char *vendor = NULL; 1071 1072 if (c->x86_vendor < X86_VENDOR_NUM) { 1073 vendor = this_cpu->c_vendor; 1074 } else { 1075 if (c->cpuid_level >= 0) 1076 vendor = c->x86_vendor_id; 1077 } 1078 1079 if (vendor && !strstr(c->x86_model_id, vendor)) 1080 printk(KERN_CONT "%s ", vendor); 1081 1082 if (c->x86_model_id[0]) 1083 printk(KERN_CONT "%s", strim(c->x86_model_id)); 1084 else 1085 printk(KERN_CONT "%d86", c->x86); 1086 1087 printk(KERN_CONT " (fam: %02x, model: %02x", c->x86, c->x86_model); 1088 1089 if (c->x86_mask || c->cpuid_level >= 0) 1090 printk(KERN_CONT ", stepping: %02x)\n", c->x86_mask); 1091 else 1092 printk(KERN_CONT ")\n"); 1093 1094 print_cpu_msr(c); 1095 } 1096 1097 void print_cpu_msr(struct cpuinfo_x86 *c) 1098 { 1099 if (c->cpu_index < show_msr) 1100 __print_cpu_msr(); 1101 } 1102 1103 static __init int setup_disablecpuid(char *arg) 1104 { 1105 int bit; 1106 1107 if (get_option(&arg, &bit) && bit < NCAPINTS*32) 1108 setup_clear_cpu_cap(bit); 1109 else 1110 return 0; 1111 1112 return 1; 1113 } 1114 __setup("clearcpuid=", setup_disablecpuid); 1115 1116 DEFINE_PER_CPU(unsigned long, kernel_stack) = 1117 (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; 1118 EXPORT_PER_CPU_SYMBOL(kernel_stack); 1119 1120 #ifdef CONFIG_X86_64 1121 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; 1122 struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, 1123 (unsigned long) debug_idt_table }; 1124 1125 DEFINE_PER_CPU_FIRST(union irq_stack_union, 1126 irq_stack_union) __aligned(PAGE_SIZE) __visible; 1127 1128 /* 1129 * The following four percpu variables are hot. Align current_task to 1130 * cacheline size such that all four fall in the same cacheline. 1131 */ 1132 DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = 1133 &init_task; 1134 EXPORT_PER_CPU_SYMBOL(current_task); 1135 1136 DEFINE_PER_CPU(char *, irq_stack_ptr) = 1137 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; 1138 1139 DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; 1140 1141 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; 1142 EXPORT_PER_CPU_SYMBOL(__preempt_count); 1143 1144 DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); 1145 1146 /* 1147 * Special IST stacks which the CPU switches to when it calls 1148 * an IST-marked descriptor entry. Up to 7 stacks (hardware 1149 * limit), all of them are 4K, except the debug stack which 1150 * is 8K. 1151 */ 1152 static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { 1153 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, 1154 [DEBUG_STACK - 1] = DEBUG_STKSZ 1155 }; 1156 1157 static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks 1158 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); 1159 1160 /* May not be marked __init: used by software suspend */ 1161 void syscall_init(void) 1162 { 1163 /* 1164 * LSTAR and STAR live in a bit strange symbiosis. 1165 * They both write to the same internal register. STAR allows to 1166 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. 1167 */ 1168 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); 1169 wrmsrl(MSR_LSTAR, system_call); 1170 wrmsrl(MSR_CSTAR, ignore_sysret); 1171 1172 #ifdef CONFIG_IA32_EMULATION 1173 syscall32_cpu_init(); 1174 #endif 1175 1176 /* Flags to clear on syscall */ 1177 wrmsrl(MSR_SYSCALL_MASK, 1178 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF| 1179 X86_EFLAGS_IOPL|X86_EFLAGS_AC); 1180 } 1181 1182 /* 1183 * Copies of the original ist values from the tss are only accessed during 1184 * debugging, no special alignment required. 1185 */ 1186 DEFINE_PER_CPU(struct orig_ist, orig_ist); 1187 1188 static DEFINE_PER_CPU(unsigned long, debug_stack_addr); 1189 DEFINE_PER_CPU(int, debug_stack_usage); 1190 1191 int is_debug_stack(unsigned long addr) 1192 { 1193 return __get_cpu_var(debug_stack_usage) || 1194 (addr <= __get_cpu_var(debug_stack_addr) && 1195 addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); 1196 } 1197 NOKPROBE_SYMBOL(is_debug_stack); 1198 1199 DEFINE_PER_CPU(u32, debug_idt_ctr); 1200 1201 void debug_stack_set_zero(void) 1202 { 1203 this_cpu_inc(debug_idt_ctr); 1204 load_current_idt(); 1205 } 1206 NOKPROBE_SYMBOL(debug_stack_set_zero); 1207 1208 void debug_stack_reset(void) 1209 { 1210 if (WARN_ON(!this_cpu_read(debug_idt_ctr))) 1211 return; 1212 if (this_cpu_dec_return(debug_idt_ctr) == 0) 1213 load_current_idt(); 1214 } 1215 NOKPROBE_SYMBOL(debug_stack_reset); 1216 1217 #else /* CONFIG_X86_64 */ 1218 1219 DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 1220 EXPORT_PER_CPU_SYMBOL(current_task); 1221 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; 1222 EXPORT_PER_CPU_SYMBOL(__preempt_count); 1223 DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); 1224 1225 #ifdef CONFIG_CC_STACKPROTECTOR 1226 DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 1227 #endif 1228 1229 #endif /* CONFIG_X86_64 */ 1230 1231 /* 1232 * Clear all 6 debug registers: 1233 */ 1234 static void clear_all_debug_regs(void) 1235 { 1236 int i; 1237 1238 for (i = 0; i < 8; i++) { 1239 /* Ignore db4, db5 */ 1240 if ((i == 4) || (i == 5)) 1241 continue; 1242 1243 set_debugreg(0, i); 1244 } 1245 } 1246 1247 #ifdef CONFIG_KGDB 1248 /* 1249 * Restore debug regs if using kgdbwait and you have a kernel debugger 1250 * connection established. 1251 */ 1252 static void dbg_restore_debug_regs(void) 1253 { 1254 if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) 1255 arch_kgdb_ops.correct_hw_break(); 1256 } 1257 #else /* ! CONFIG_KGDB */ 1258 #define dbg_restore_debug_regs() 1259 #endif /* ! CONFIG_KGDB */ 1260 1261 /* 1262 * cpu_init() initializes state that is per-CPU. Some data is already 1263 * initialized (naturally) in the bootstrap process, such as the GDT 1264 * and IDT. We reload them nevertheless, this function acts as a 1265 * 'CPU state barrier', nothing should get across. 1266 * A lot of state is already set up in PDA init for 64 bit 1267 */ 1268 #ifdef CONFIG_X86_64 1269 1270 void cpu_init(void) 1271 { 1272 struct orig_ist *oist; 1273 struct task_struct *me; 1274 struct tss_struct *t; 1275 unsigned long v; 1276 int cpu; 1277 int i; 1278 1279 /* 1280 * Load microcode on this cpu if a valid microcode is available. 1281 * This is early microcode loading procedure. 1282 */ 1283 load_ucode_ap(); 1284 1285 cpu = stack_smp_processor_id(); 1286 t = &per_cpu(init_tss, cpu); 1287 oist = &per_cpu(orig_ist, cpu); 1288 1289 #ifdef CONFIG_NUMA 1290 if (this_cpu_read(numa_node) == 0 && 1291 early_cpu_to_node(cpu) != NUMA_NO_NODE) 1292 set_numa_node(early_cpu_to_node(cpu)); 1293 #endif 1294 1295 me = current; 1296 1297 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) 1298 panic("CPU#%d already initialized!\n", cpu); 1299 1300 pr_debug("Initializing CPU#%d\n", cpu); 1301 1302 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1303 1304 /* 1305 * Initialize the per-CPU GDT with the boot GDT, 1306 * and set up the GDT descriptor: 1307 */ 1308 1309 switch_to_new_gdt(cpu); 1310 loadsegment(fs, 0); 1311 1312 load_current_idt(); 1313 1314 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 1315 syscall_init(); 1316 1317 wrmsrl(MSR_FS_BASE, 0); 1318 wrmsrl(MSR_KERNEL_GS_BASE, 0); 1319 barrier(); 1320 1321 x86_configure_nx(); 1322 enable_x2apic(); 1323 1324 /* 1325 * set up and load the per-CPU TSS 1326 */ 1327 if (!oist->ist[0]) { 1328 char *estacks = per_cpu(exception_stacks, cpu); 1329 1330 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 1331 estacks += exception_stack_sizes[v]; 1332 oist->ist[v] = t->x86_tss.ist[v] = 1333 (unsigned long)estacks; 1334 if (v == DEBUG_STACK-1) 1335 per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks; 1336 } 1337 } 1338 1339 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1340 1341 /* 1342 * <= is required because the CPU will access up to 1343 * 8 bits beyond the end of the IO permission bitmap. 1344 */ 1345 for (i = 0; i <= IO_BITMAP_LONGS; i++) 1346 t->io_bitmap[i] = ~0UL; 1347 1348 atomic_inc(&init_mm.mm_count); 1349 me->active_mm = &init_mm; 1350 BUG_ON(me->mm); 1351 enter_lazy_tlb(&init_mm, me); 1352 1353 load_sp0(t, ¤t->thread); 1354 set_tss_desc(cpu, t); 1355 load_TR_desc(); 1356 load_LDT(&init_mm.context); 1357 1358 clear_all_debug_regs(); 1359 dbg_restore_debug_regs(); 1360 1361 fpu_init(); 1362 1363 if (is_uv_system()) 1364 uv_cpu_init(); 1365 } 1366 1367 #else 1368 1369 void cpu_init(void) 1370 { 1371 int cpu = smp_processor_id(); 1372 struct task_struct *curr = current; 1373 struct tss_struct *t = &per_cpu(init_tss, cpu); 1374 struct thread_struct *thread = &curr->thread; 1375 1376 show_ucode_info_early(); 1377 1378 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { 1379 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); 1380 for (;;) 1381 local_irq_enable(); 1382 } 1383 1384 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1385 1386 if (cpu_has_vme || cpu_has_tsc || cpu_has_de) 1387 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1388 1389 load_current_idt(); 1390 switch_to_new_gdt(cpu); 1391 1392 /* 1393 * Set up and load the per-CPU TSS and LDT 1394 */ 1395 atomic_inc(&init_mm.mm_count); 1396 curr->active_mm = &init_mm; 1397 BUG_ON(curr->mm); 1398 enter_lazy_tlb(&init_mm, curr); 1399 1400 load_sp0(t, thread); 1401 set_tss_desc(cpu, t); 1402 load_TR_desc(); 1403 load_LDT(&init_mm.context); 1404 1405 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1406 1407 #ifdef CONFIG_DOUBLEFAULT 1408 /* Set up doublefault TSS pointer in the GDT */ 1409 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1410 #endif 1411 1412 clear_all_debug_regs(); 1413 dbg_restore_debug_regs(); 1414 1415 fpu_init(); 1416 } 1417 #endif 1418 1419 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS 1420 void warn_pre_alternatives(void) 1421 { 1422 WARN(1, "You're using static_cpu_has before alternatives have run!\n"); 1423 } 1424 EXPORT_SYMBOL_GPL(warn_pre_alternatives); 1425 #endif 1426 1427 inline bool __static_cpu_has_safe(u16 bit) 1428 { 1429 return boot_cpu_has(bit); 1430 } 1431 EXPORT_SYMBOL_GPL(__static_cpu_has_safe); 1432