1 #include <linux/bootmem.h> 2 #include <linux/linkage.h> 3 #include <linux/bitops.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/percpu.h> 7 #include <linux/string.h> 8 #include <linux/ctype.h> 9 #include <linux/delay.h> 10 #include <linux/sched.h> 11 #include <linux/init.h> 12 #include <linux/kprobes.h> 13 #include <linux/kgdb.h> 14 #include <linux/smp.h> 15 #include <linux/io.h> 16 #include <linux/syscore_ops.h> 17 18 #include <asm/stackprotector.h> 19 #include <asm/perf_event.h> 20 #include <asm/mmu_context.h> 21 #include <asm/archrandom.h> 22 #include <asm/hypervisor.h> 23 #include <asm/processor.h> 24 #include <asm/tlbflush.h> 25 #include <asm/debugreg.h> 26 #include <asm/sections.h> 27 #include <asm/vsyscall.h> 28 #include <linux/topology.h> 29 #include <linux/cpumask.h> 30 #include <asm/pgtable.h> 31 #include <linux/atomic.h> 32 #include <asm/proto.h> 33 #include <asm/setup.h> 34 #include <asm/apic.h> 35 #include <asm/desc.h> 36 #include <asm/fpu/internal.h> 37 #include <asm/mtrr.h> 38 #include <linux/numa.h> 39 #include <asm/asm.h> 40 #include <asm/cpu.h> 41 #include <asm/mce.h> 42 #include <asm/msr.h> 43 #include <asm/pat.h> 44 #include <asm/microcode.h> 45 #include <asm/microcode_intel.h> 46 47 #ifdef CONFIG_X86_LOCAL_APIC 48 #include <asm/uv/uv.h> 49 #endif 50 51 #include "cpu.h" 52 53 /* all of these masks are initialized in setup_cpu_local_masks() */ 54 cpumask_var_t cpu_initialized_mask; 55 cpumask_var_t cpu_callout_mask; 56 cpumask_var_t cpu_callin_mask; 57 58 /* representing cpus for which sibling maps can be computed */ 59 cpumask_var_t cpu_sibling_setup_mask; 60 61 /* correctly size the local cpu masks */ 62 void __init setup_cpu_local_masks(void) 63 { 64 alloc_bootmem_cpumask_var(&cpu_initialized_mask); 65 alloc_bootmem_cpumask_var(&cpu_callin_mask); 66 alloc_bootmem_cpumask_var(&cpu_callout_mask); 67 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 68 } 69 70 static void default_init(struct cpuinfo_x86 *c) 71 { 72 #ifdef CONFIG_X86_64 73 cpu_detect_cache_sizes(c); 74 #else 75 /* Not much we can do here... */ 76 /* Check if at least it has cpuid */ 77 if (c->cpuid_level == -1) { 78 /* No cpuid. It must be an ancient CPU */ 79 if (c->x86 == 4) 80 strcpy(c->x86_model_id, "486"); 81 else if (c->x86 == 3) 82 strcpy(c->x86_model_id, "386"); 83 } 84 #endif 85 } 86 87 static const struct cpu_dev default_cpu = { 88 .c_init = default_init, 89 .c_vendor = "Unknown", 90 .c_x86_vendor = X86_VENDOR_UNKNOWN, 91 }; 92 93 static const struct cpu_dev *this_cpu = &default_cpu; 94 95 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 96 #ifdef CONFIG_X86_64 97 /* 98 * We need valid kernel segments for data and code in long mode too 99 * IRET will check the segment types kkeil 2000/10/28 100 * Also sysret mandates a special GDT layout 101 * 102 * TLS descriptors are currently at a different place compared to i386. 103 * Hopefully nobody expects them at a fixed place (Wine?) 104 */ 105 [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), 106 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), 107 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), 108 [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), 109 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), 110 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), 111 #else 112 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), 113 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 114 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), 115 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), 116 /* 117 * Segments used for calling PnP BIOS have byte granularity. 118 * They code segments and data segments have fixed 64k limits, 119 * the transfer segment sizes are set at run time. 120 */ 121 /* 32-bit code */ 122 [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 123 /* 16-bit code */ 124 [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 125 /* 16-bit data */ 126 [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), 127 /* 16-bit data */ 128 [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), 129 /* 16-bit data */ 130 [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), 131 /* 132 * The APM segments have byte granularity and their bases 133 * are set at run time. All have 64k limits. 134 */ 135 /* 32-bit code */ 136 [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 137 /* 16-bit code */ 138 [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 139 /* data */ 140 [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), 141 142 [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 143 [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 144 GDT_STACK_CANARY_INIT 145 #endif 146 } }; 147 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 148 149 static int __init x86_mpx_setup(char *s) 150 { 151 /* require an exact match without trailing characters */ 152 if (strlen(s)) 153 return 0; 154 155 /* do not emit a message if the feature is not present */ 156 if (!boot_cpu_has(X86_FEATURE_MPX)) 157 return 1; 158 159 setup_clear_cpu_cap(X86_FEATURE_MPX); 160 pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n"); 161 return 1; 162 } 163 __setup("nompx", x86_mpx_setup); 164 165 #ifdef CONFIG_X86_32 166 static int cachesize_override = -1; 167 static int disable_x86_serial_nr = 1; 168 169 static int __init cachesize_setup(char *str) 170 { 171 get_option(&str, &cachesize_override); 172 return 1; 173 } 174 __setup("cachesize=", cachesize_setup); 175 176 static int __init x86_sep_setup(char *s) 177 { 178 setup_clear_cpu_cap(X86_FEATURE_SEP); 179 return 1; 180 } 181 __setup("nosep", x86_sep_setup); 182 183 /* Standard macro to see if a specific flag is changeable */ 184 static inline int flag_is_changeable_p(u32 flag) 185 { 186 u32 f1, f2; 187 188 /* 189 * Cyrix and IDT cpus allow disabling of CPUID 190 * so the code below may return different results 191 * when it is executed before and after enabling 192 * the CPUID. Add "volatile" to not allow gcc to 193 * optimize the subsequent calls to this function. 194 */ 195 asm volatile ("pushfl \n\t" 196 "pushfl \n\t" 197 "popl %0 \n\t" 198 "movl %0, %1 \n\t" 199 "xorl %2, %0 \n\t" 200 "pushl %0 \n\t" 201 "popfl \n\t" 202 "pushfl \n\t" 203 "popl %0 \n\t" 204 "popfl \n\t" 205 206 : "=&r" (f1), "=&r" (f2) 207 : "ir" (flag)); 208 209 return ((f1^f2) & flag) != 0; 210 } 211 212 /* Probe for the CPUID instruction */ 213 int have_cpuid_p(void) 214 { 215 return flag_is_changeable_p(X86_EFLAGS_ID); 216 } 217 218 static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 219 { 220 unsigned long lo, hi; 221 222 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) 223 return; 224 225 /* Disable processor serial number: */ 226 227 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 228 lo |= 0x200000; 229 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 230 231 printk(KERN_NOTICE "CPU serial number disabled.\n"); 232 clear_cpu_cap(c, X86_FEATURE_PN); 233 234 /* Disabling the serial number may affect the cpuid level */ 235 c->cpuid_level = cpuid_eax(0); 236 } 237 238 static int __init x86_serial_nr_setup(char *s) 239 { 240 disable_x86_serial_nr = 0; 241 return 1; 242 } 243 __setup("serialnumber", x86_serial_nr_setup); 244 #else 245 static inline int flag_is_changeable_p(u32 flag) 246 { 247 return 1; 248 } 249 static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 250 { 251 } 252 #endif 253 254 static __init int setup_disable_smep(char *arg) 255 { 256 setup_clear_cpu_cap(X86_FEATURE_SMEP); 257 return 1; 258 } 259 __setup("nosmep", setup_disable_smep); 260 261 static __always_inline void setup_smep(struct cpuinfo_x86 *c) 262 { 263 if (cpu_has(c, X86_FEATURE_SMEP)) 264 cr4_set_bits(X86_CR4_SMEP); 265 } 266 267 static __init int setup_disable_smap(char *arg) 268 { 269 setup_clear_cpu_cap(X86_FEATURE_SMAP); 270 return 1; 271 } 272 __setup("nosmap", setup_disable_smap); 273 274 static __always_inline void setup_smap(struct cpuinfo_x86 *c) 275 { 276 unsigned long eflags = native_save_fl(); 277 278 /* This should have been cleared long ago */ 279 BUG_ON(eflags & X86_EFLAGS_AC); 280 281 if (cpu_has(c, X86_FEATURE_SMAP)) { 282 #ifdef CONFIG_X86_SMAP 283 cr4_set_bits(X86_CR4_SMAP); 284 #else 285 cr4_clear_bits(X86_CR4_SMAP); 286 #endif 287 } 288 } 289 290 /* 291 * Some CPU features depend on higher CPUID levels, which may not always 292 * be available due to CPUID level capping or broken virtualization 293 * software. Add those features to this table to auto-disable them. 294 */ 295 struct cpuid_dependent_feature { 296 u32 feature; 297 u32 level; 298 }; 299 300 static const struct cpuid_dependent_feature 301 cpuid_dependent_features[] = { 302 { X86_FEATURE_MWAIT, 0x00000005 }, 303 { X86_FEATURE_DCA, 0x00000009 }, 304 { X86_FEATURE_XSAVE, 0x0000000d }, 305 { 0, 0 } 306 }; 307 308 static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 309 { 310 const struct cpuid_dependent_feature *df; 311 312 for (df = cpuid_dependent_features; df->feature; df++) { 313 314 if (!cpu_has(c, df->feature)) 315 continue; 316 /* 317 * Note: cpuid_level is set to -1 if unavailable, but 318 * extended_extended_level is set to 0 if unavailable 319 * and the legitimate extended levels are all negative 320 * when signed; hence the weird messing around with 321 * signs here... 322 */ 323 if (!((s32)df->level < 0 ? 324 (u32)df->level > (u32)c->extended_cpuid_level : 325 (s32)df->level > (s32)c->cpuid_level)) 326 continue; 327 328 clear_cpu_cap(c, df->feature); 329 if (!warn) 330 continue; 331 332 printk(KERN_WARNING 333 "CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", 334 x86_cap_flag(df->feature), df->level); 335 } 336 } 337 338 /* 339 * Naming convention should be: <Name> [(<Codename>)] 340 * This table only is used unless init_<vendor>() below doesn't set it; 341 * in particular, if CPUID levels 0x80000002..4 are supported, this 342 * isn't used 343 */ 344 345 /* Look up CPU names by table lookup. */ 346 static const char *table_lookup_model(struct cpuinfo_x86 *c) 347 { 348 #ifdef CONFIG_X86_32 349 const struct legacy_cpu_model_info *info; 350 351 if (c->x86_model >= 16) 352 return NULL; /* Range check */ 353 354 if (!this_cpu) 355 return NULL; 356 357 info = this_cpu->legacy_models; 358 359 while (info->family) { 360 if (info->family == c->x86) 361 return info->model_names[c->x86_model]; 362 info++; 363 } 364 #endif 365 return NULL; /* Not found */ 366 } 367 368 __u32 cpu_caps_cleared[NCAPINTS]; 369 __u32 cpu_caps_set[NCAPINTS]; 370 371 void load_percpu_segment(int cpu) 372 { 373 #ifdef CONFIG_X86_32 374 loadsegment(fs, __KERNEL_PERCPU); 375 #else 376 loadsegment(gs, 0); 377 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); 378 #endif 379 load_stack_canary_segment(); 380 } 381 382 /* 383 * Current gdt points %fs at the "master" per-cpu area: after this, 384 * it's on the real one. 385 */ 386 void switch_to_new_gdt(int cpu) 387 { 388 struct desc_ptr gdt_descr; 389 390 gdt_descr.address = (long)get_cpu_gdt_table(cpu); 391 gdt_descr.size = GDT_SIZE - 1; 392 load_gdt(&gdt_descr); 393 /* Reload the per-cpu base */ 394 395 load_percpu_segment(cpu); 396 } 397 398 static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 399 400 static void get_model_name(struct cpuinfo_x86 *c) 401 { 402 unsigned int *v; 403 char *p, *q, *s; 404 405 if (c->extended_cpuid_level < 0x80000004) 406 return; 407 408 v = (unsigned int *)c->x86_model_id; 409 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); 410 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); 411 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 412 c->x86_model_id[48] = 0; 413 414 /* Trim whitespace */ 415 p = q = s = &c->x86_model_id[0]; 416 417 while (*p == ' ') 418 p++; 419 420 while (*p) { 421 /* Note the last non-whitespace index */ 422 if (!isspace(*p)) 423 s = q; 424 425 *q++ = *p++; 426 } 427 428 *(s + 1) = '\0'; 429 } 430 431 void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) 432 { 433 unsigned int n, dummy, ebx, ecx, edx, l2size; 434 435 n = c->extended_cpuid_level; 436 437 if (n >= 0x80000005) { 438 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); 439 c->x86_cache_size = (ecx>>24) + (edx>>24); 440 #ifdef CONFIG_X86_64 441 /* On K8 L1 TLB is inclusive, so don't count it */ 442 c->x86_tlbsize = 0; 443 #endif 444 } 445 446 if (n < 0x80000006) /* Some chips just has a large L1. */ 447 return; 448 449 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); 450 l2size = ecx >> 16; 451 452 #ifdef CONFIG_X86_64 453 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); 454 #else 455 /* do processor-specific cache resizing */ 456 if (this_cpu->legacy_cache_size) 457 l2size = this_cpu->legacy_cache_size(c, l2size); 458 459 /* Allow user to override all this if necessary. */ 460 if (cachesize_override != -1) 461 l2size = cachesize_override; 462 463 if (l2size == 0) 464 return; /* Again, no L2 cache is possible */ 465 #endif 466 467 c->x86_cache_size = l2size; 468 } 469 470 u16 __read_mostly tlb_lli_4k[NR_INFO]; 471 u16 __read_mostly tlb_lli_2m[NR_INFO]; 472 u16 __read_mostly tlb_lli_4m[NR_INFO]; 473 u16 __read_mostly tlb_lld_4k[NR_INFO]; 474 u16 __read_mostly tlb_lld_2m[NR_INFO]; 475 u16 __read_mostly tlb_lld_4m[NR_INFO]; 476 u16 __read_mostly tlb_lld_1g[NR_INFO]; 477 478 static void cpu_detect_tlb(struct cpuinfo_x86 *c) 479 { 480 if (this_cpu->c_detect_tlb) 481 this_cpu->c_detect_tlb(c); 482 483 pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n", 484 tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], 485 tlb_lli_4m[ENTRIES]); 486 487 pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n", 488 tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES], 489 tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]); 490 } 491 492 void detect_ht(struct cpuinfo_x86 *c) 493 { 494 #ifdef CONFIG_SMP 495 u32 eax, ebx, ecx, edx; 496 int index_msb, core_bits; 497 static bool printed; 498 499 if (!cpu_has(c, X86_FEATURE_HT)) 500 return; 501 502 if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) 503 goto out; 504 505 if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) 506 return; 507 508 cpuid(1, &eax, &ebx, &ecx, &edx); 509 510 smp_num_siblings = (ebx & 0xff0000) >> 16; 511 512 if (smp_num_siblings == 1) { 513 printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n"); 514 goto out; 515 } 516 517 if (smp_num_siblings <= 1) 518 goto out; 519 520 index_msb = get_count_order(smp_num_siblings); 521 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); 522 523 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 524 525 index_msb = get_count_order(smp_num_siblings); 526 527 core_bits = get_count_order(c->x86_max_cores); 528 529 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & 530 ((1 << core_bits) - 1); 531 532 out: 533 if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { 534 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 535 c->phys_proc_id); 536 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 537 c->cpu_core_id); 538 printed = 1; 539 } 540 #endif 541 } 542 543 static void get_cpu_vendor(struct cpuinfo_x86 *c) 544 { 545 char *v = c->x86_vendor_id; 546 int i; 547 548 for (i = 0; i < X86_VENDOR_NUM; i++) { 549 if (!cpu_devs[i]) 550 break; 551 552 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 553 (cpu_devs[i]->c_ident[1] && 554 !strcmp(v, cpu_devs[i]->c_ident[1]))) { 555 556 this_cpu = cpu_devs[i]; 557 c->x86_vendor = this_cpu->c_x86_vendor; 558 return; 559 } 560 } 561 562 printk_once(KERN_ERR 563 "CPU: vendor_id '%s' unknown, using generic init.\n" \ 564 "CPU: Your system may be unstable.\n", v); 565 566 c->x86_vendor = X86_VENDOR_UNKNOWN; 567 this_cpu = &default_cpu; 568 } 569 570 void cpu_detect(struct cpuinfo_x86 *c) 571 { 572 /* Get vendor name */ 573 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 574 (unsigned int *)&c->x86_vendor_id[0], 575 (unsigned int *)&c->x86_vendor_id[8], 576 (unsigned int *)&c->x86_vendor_id[4]); 577 578 c->x86 = 4; 579 /* Intel-defined flags: level 0x00000001 */ 580 if (c->cpuid_level >= 0x00000001) { 581 u32 junk, tfms, cap0, misc; 582 583 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 584 c->x86 = x86_family(tfms); 585 c->x86_model = x86_model(tfms); 586 c->x86_mask = x86_stepping(tfms); 587 588 if (cap0 & (1<<19)) { 589 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 590 c->x86_cache_alignment = c->x86_clflush_size; 591 } 592 } 593 } 594 595 void get_cpu_cap(struct cpuinfo_x86 *c) 596 { 597 u32 eax, ebx, ecx, edx; 598 599 /* Intel-defined flags: level 0x00000001 */ 600 if (c->cpuid_level >= 0x00000001) { 601 cpuid(0x00000001, &eax, &ebx, &ecx, &edx); 602 603 c->x86_capability[CPUID_1_ECX] = ecx; 604 c->x86_capability[CPUID_1_EDX] = edx; 605 } 606 607 /* Additional Intel-defined flags: level 0x00000007 */ 608 if (c->cpuid_level >= 0x00000007) { 609 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); 610 611 c->x86_capability[CPUID_7_0_EBX] = ebx; 612 613 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); 614 } 615 616 /* Extended state features: level 0x0000000d */ 617 if (c->cpuid_level >= 0x0000000d) { 618 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); 619 620 c->x86_capability[CPUID_D_1_EAX] = eax; 621 } 622 623 /* Additional Intel-defined flags: level 0x0000000F */ 624 if (c->cpuid_level >= 0x0000000F) { 625 626 /* QoS sub-leaf, EAX=0Fh, ECX=0 */ 627 cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx); 628 c->x86_capability[CPUID_F_0_EDX] = edx; 629 630 if (cpu_has(c, X86_FEATURE_CQM_LLC)) { 631 /* will be overridden if occupancy monitoring exists */ 632 c->x86_cache_max_rmid = ebx; 633 634 /* QoS sub-leaf, EAX=0Fh, ECX=1 */ 635 cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx); 636 c->x86_capability[CPUID_F_1_EDX] = edx; 637 638 if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) { 639 c->x86_cache_max_rmid = ecx; 640 c->x86_cache_occ_scale = ebx; 641 } 642 } else { 643 c->x86_cache_max_rmid = -1; 644 c->x86_cache_occ_scale = -1; 645 } 646 } 647 648 /* AMD-defined flags: level 0x80000001 */ 649 eax = cpuid_eax(0x80000000); 650 c->extended_cpuid_level = eax; 651 652 if ((eax & 0xffff0000) == 0x80000000) { 653 if (eax >= 0x80000001) { 654 cpuid(0x80000001, &eax, &ebx, &ecx, &edx); 655 656 c->x86_capability[CPUID_8000_0001_ECX] = ecx; 657 c->x86_capability[CPUID_8000_0001_EDX] = edx; 658 } 659 } 660 661 if (c->extended_cpuid_level >= 0x80000008) { 662 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); 663 664 c->x86_virt_bits = (eax >> 8) & 0xff; 665 c->x86_phys_bits = eax & 0xff; 666 c->x86_capability[CPUID_8000_0008_EBX] = ebx; 667 } 668 #ifdef CONFIG_X86_32 669 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) 670 c->x86_phys_bits = 36; 671 #endif 672 673 if (c->extended_cpuid_level >= 0x80000007) 674 c->x86_power = cpuid_edx(0x80000007); 675 676 if (c->extended_cpuid_level >= 0x8000000a) 677 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); 678 679 init_scattered_cpuid_features(c); 680 } 681 682 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) 683 { 684 #ifdef CONFIG_X86_32 685 int i; 686 687 /* 688 * First of all, decide if this is a 486 or higher 689 * It's a 486 if we can modify the AC flag 690 */ 691 if (flag_is_changeable_p(X86_EFLAGS_AC)) 692 c->x86 = 4; 693 else 694 c->x86 = 3; 695 696 for (i = 0; i < X86_VENDOR_NUM; i++) 697 if (cpu_devs[i] && cpu_devs[i]->c_identify) { 698 c->x86_vendor_id[0] = 0; 699 cpu_devs[i]->c_identify(c); 700 if (c->x86_vendor_id[0]) { 701 get_cpu_vendor(c); 702 break; 703 } 704 } 705 #endif 706 } 707 708 /* 709 * Do minimum CPU detection early. 710 * Fields really needed: vendor, cpuid_level, family, model, mask, 711 * cache alignment. 712 * The others are not touched to avoid unwanted side effects. 713 * 714 * WARNING: this function is only called on the BP. Don't add code here 715 * that is supposed to run on all CPUs. 716 */ 717 static void __init early_identify_cpu(struct cpuinfo_x86 *c) 718 { 719 #ifdef CONFIG_X86_64 720 c->x86_clflush_size = 64; 721 c->x86_phys_bits = 36; 722 c->x86_virt_bits = 48; 723 #else 724 c->x86_clflush_size = 32; 725 c->x86_phys_bits = 32; 726 c->x86_virt_bits = 32; 727 #endif 728 c->x86_cache_alignment = c->x86_clflush_size; 729 730 memset(&c->x86_capability, 0, sizeof c->x86_capability); 731 c->extended_cpuid_level = 0; 732 733 if (!have_cpuid_p()) 734 identify_cpu_without_cpuid(c); 735 736 /* cyrix could have cpuid enabled via c_identify()*/ 737 if (!have_cpuid_p()) 738 return; 739 740 cpu_detect(c); 741 get_cpu_vendor(c); 742 get_cpu_cap(c); 743 744 if (this_cpu->c_early_init) 745 this_cpu->c_early_init(c); 746 747 c->cpu_index = 0; 748 filter_cpuid_features(c, false); 749 750 if (this_cpu->c_bsp_init) 751 this_cpu->c_bsp_init(c); 752 753 setup_force_cpu_cap(X86_FEATURE_ALWAYS); 754 fpu__init_system(c); 755 } 756 757 void __init early_cpu_init(void) 758 { 759 const struct cpu_dev *const *cdev; 760 int count = 0; 761 762 #ifdef CONFIG_PROCESSOR_SELECT 763 printk(KERN_INFO "KERNEL supported cpus:\n"); 764 #endif 765 766 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { 767 const struct cpu_dev *cpudev = *cdev; 768 769 if (count >= X86_VENDOR_NUM) 770 break; 771 cpu_devs[count] = cpudev; 772 count++; 773 774 #ifdef CONFIG_PROCESSOR_SELECT 775 { 776 unsigned int j; 777 778 for (j = 0; j < 2; j++) { 779 if (!cpudev->c_ident[j]) 780 continue; 781 printk(KERN_INFO " %s %s\n", cpudev->c_vendor, 782 cpudev->c_ident[j]); 783 } 784 } 785 #endif 786 } 787 early_identify_cpu(&boot_cpu_data); 788 } 789 790 /* 791 * The NOPL instruction is supposed to exist on all CPUs of family >= 6; 792 * unfortunately, that's not true in practice because of early VIA 793 * chips and (more importantly) broken virtualizers that are not easy 794 * to detect. In the latter case it doesn't even *fail* reliably, so 795 * probing for it doesn't even work. Disable it completely on 32-bit 796 * unless we can find a reliable way to detect all the broken cases. 797 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). 798 */ 799 static void detect_nopl(struct cpuinfo_x86 *c) 800 { 801 #ifdef CONFIG_X86_32 802 clear_cpu_cap(c, X86_FEATURE_NOPL); 803 #else 804 set_cpu_cap(c, X86_FEATURE_NOPL); 805 #endif 806 } 807 808 static void generic_identify(struct cpuinfo_x86 *c) 809 { 810 c->extended_cpuid_level = 0; 811 812 if (!have_cpuid_p()) 813 identify_cpu_without_cpuid(c); 814 815 /* cyrix could have cpuid enabled via c_identify()*/ 816 if (!have_cpuid_p()) 817 return; 818 819 cpu_detect(c); 820 821 get_cpu_vendor(c); 822 823 get_cpu_cap(c); 824 825 if (c->cpuid_level >= 0x00000001) { 826 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; 827 #ifdef CONFIG_X86_32 828 # ifdef CONFIG_SMP 829 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 830 # else 831 c->apicid = c->initial_apicid; 832 # endif 833 #endif 834 c->phys_proc_id = c->initial_apicid; 835 } 836 837 get_model_name(c); /* Default name */ 838 839 detect_nopl(c); 840 } 841 842 static void x86_init_cache_qos(struct cpuinfo_x86 *c) 843 { 844 /* 845 * The heavy lifting of max_rmid and cache_occ_scale are handled 846 * in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu 847 * in case CQM bits really aren't there in this CPU. 848 */ 849 if (c != &boot_cpu_data) { 850 boot_cpu_data.x86_cache_max_rmid = 851 min(boot_cpu_data.x86_cache_max_rmid, 852 c->x86_cache_max_rmid); 853 } 854 } 855 856 /* 857 * This does the hard work of actually picking apart the CPU stuff... 858 */ 859 static void identify_cpu(struct cpuinfo_x86 *c) 860 { 861 int i; 862 863 c->loops_per_jiffy = loops_per_jiffy; 864 c->x86_cache_size = -1; 865 c->x86_vendor = X86_VENDOR_UNKNOWN; 866 c->x86_model = c->x86_mask = 0; /* So far unknown... */ 867 c->x86_vendor_id[0] = '\0'; /* Unset */ 868 c->x86_model_id[0] = '\0'; /* Unset */ 869 c->x86_max_cores = 1; 870 c->x86_coreid_bits = 0; 871 #ifdef CONFIG_X86_64 872 c->x86_clflush_size = 64; 873 c->x86_phys_bits = 36; 874 c->x86_virt_bits = 48; 875 #else 876 c->cpuid_level = -1; /* CPUID not detected */ 877 c->x86_clflush_size = 32; 878 c->x86_phys_bits = 32; 879 c->x86_virt_bits = 32; 880 #endif 881 c->x86_cache_alignment = c->x86_clflush_size; 882 memset(&c->x86_capability, 0, sizeof c->x86_capability); 883 884 generic_identify(c); 885 886 if (this_cpu->c_identify) 887 this_cpu->c_identify(c); 888 889 /* Clear/Set all flags overriden by options, after probe */ 890 for (i = 0; i < NCAPINTS; i++) { 891 c->x86_capability[i] &= ~cpu_caps_cleared[i]; 892 c->x86_capability[i] |= cpu_caps_set[i]; 893 } 894 895 #ifdef CONFIG_X86_64 896 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 897 #endif 898 899 /* 900 * Vendor-specific initialization. In this section we 901 * canonicalize the feature flags, meaning if there are 902 * features a certain CPU supports which CPUID doesn't 903 * tell us, CPUID claiming incorrect flags, or other bugs, 904 * we handle them here. 905 * 906 * At the end of this section, c->x86_capability better 907 * indicate the features this CPU genuinely supports! 908 */ 909 if (this_cpu->c_init) 910 this_cpu->c_init(c); 911 912 /* Disable the PN if appropriate */ 913 squash_the_stupid_serial_number(c); 914 915 /* Set up SMEP/SMAP */ 916 setup_smep(c); 917 setup_smap(c); 918 919 /* 920 * The vendor-specific functions might have changed features. 921 * Now we do "generic changes." 922 */ 923 924 /* Filter out anything that depends on CPUID levels we don't have */ 925 filter_cpuid_features(c, true); 926 927 /* If the model name is still unset, do table lookup. */ 928 if (!c->x86_model_id[0]) { 929 const char *p; 930 p = table_lookup_model(c); 931 if (p) 932 strcpy(c->x86_model_id, p); 933 else 934 /* Last resort... */ 935 sprintf(c->x86_model_id, "%02x/%02x", 936 c->x86, c->x86_model); 937 } 938 939 #ifdef CONFIG_X86_64 940 detect_ht(c); 941 #endif 942 943 init_hypervisor(c); 944 x86_init_rdrand(c); 945 x86_init_cache_qos(c); 946 947 /* 948 * Clear/Set all flags overriden by options, need do it 949 * before following smp all cpus cap AND. 950 */ 951 for (i = 0; i < NCAPINTS; i++) { 952 c->x86_capability[i] &= ~cpu_caps_cleared[i]; 953 c->x86_capability[i] |= cpu_caps_set[i]; 954 } 955 956 /* 957 * On SMP, boot_cpu_data holds the common feature set between 958 * all CPUs; so make sure that we indicate which features are 959 * common between the CPUs. The first time this routine gets 960 * executed, c == &boot_cpu_data. 961 */ 962 if (c != &boot_cpu_data) { 963 /* AND the already accumulated flags with these */ 964 for (i = 0; i < NCAPINTS; i++) 965 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 966 967 /* OR, i.e. replicate the bug flags */ 968 for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++) 969 c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; 970 } 971 972 /* Init Machine Check Exception if available. */ 973 mcheck_cpu_init(c); 974 975 select_idle_routine(c); 976 977 #ifdef CONFIG_NUMA 978 numa_add_cpu(smp_processor_id()); 979 #endif 980 } 981 982 /* 983 * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions 984 * on 32-bit kernels: 985 */ 986 #ifdef CONFIG_X86_32 987 void enable_sep_cpu(void) 988 { 989 struct tss_struct *tss; 990 int cpu; 991 992 cpu = get_cpu(); 993 tss = &per_cpu(cpu_tss, cpu); 994 995 if (!boot_cpu_has(X86_FEATURE_SEP)) 996 goto out; 997 998 /* 999 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- 1000 * see the big comment in struct x86_hw_tss's definition. 1001 */ 1002 1003 tss->x86_tss.ss1 = __KERNEL_CS; 1004 wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); 1005 1006 wrmsr(MSR_IA32_SYSENTER_ESP, 1007 (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack), 1008 0); 1009 1010 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); 1011 1012 out: 1013 put_cpu(); 1014 } 1015 #endif 1016 1017 void __init identify_boot_cpu(void) 1018 { 1019 identify_cpu(&boot_cpu_data); 1020 init_amd_e400_c1e_mask(); 1021 #ifdef CONFIG_X86_32 1022 sysenter_setup(); 1023 enable_sep_cpu(); 1024 #endif 1025 cpu_detect_tlb(&boot_cpu_data); 1026 } 1027 1028 void identify_secondary_cpu(struct cpuinfo_x86 *c) 1029 { 1030 BUG_ON(c == &boot_cpu_data); 1031 identify_cpu(c); 1032 #ifdef CONFIG_X86_32 1033 enable_sep_cpu(); 1034 #endif 1035 mtrr_ap_init(); 1036 } 1037 1038 struct msr_range { 1039 unsigned min; 1040 unsigned max; 1041 }; 1042 1043 static const struct msr_range msr_range_array[] = { 1044 { 0x00000000, 0x00000418}, 1045 { 0xc0000000, 0xc000040b}, 1046 { 0xc0010000, 0xc0010142}, 1047 { 0xc0011000, 0xc001103b}, 1048 }; 1049 1050 static void __print_cpu_msr(void) 1051 { 1052 unsigned index_min, index_max; 1053 unsigned index; 1054 u64 val; 1055 int i; 1056 1057 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { 1058 index_min = msr_range_array[i].min; 1059 index_max = msr_range_array[i].max; 1060 1061 for (index = index_min; index < index_max; index++) { 1062 if (rdmsrl_safe(index, &val)) 1063 continue; 1064 printk(KERN_INFO " MSR%08x: %016llx\n", index, val); 1065 } 1066 } 1067 } 1068 1069 static int show_msr; 1070 1071 static __init int setup_show_msr(char *arg) 1072 { 1073 int num; 1074 1075 get_option(&arg, &num); 1076 1077 if (num > 0) 1078 show_msr = num; 1079 return 1; 1080 } 1081 __setup("show_msr=", setup_show_msr); 1082 1083 static __init int setup_noclflush(char *arg) 1084 { 1085 setup_clear_cpu_cap(X86_FEATURE_CLFLUSH); 1086 setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT); 1087 return 1; 1088 } 1089 __setup("noclflush", setup_noclflush); 1090 1091 void print_cpu_info(struct cpuinfo_x86 *c) 1092 { 1093 const char *vendor = NULL; 1094 1095 if (c->x86_vendor < X86_VENDOR_NUM) { 1096 vendor = this_cpu->c_vendor; 1097 } else { 1098 if (c->cpuid_level >= 0) 1099 vendor = c->x86_vendor_id; 1100 } 1101 1102 if (vendor && !strstr(c->x86_model_id, vendor)) 1103 printk(KERN_CONT "%s ", vendor); 1104 1105 if (c->x86_model_id[0]) 1106 printk(KERN_CONT "%s", c->x86_model_id); 1107 else 1108 printk(KERN_CONT "%d86", c->x86); 1109 1110 printk(KERN_CONT " (family: 0x%x, model: 0x%x", c->x86, c->x86_model); 1111 1112 if (c->x86_mask || c->cpuid_level >= 0) 1113 printk(KERN_CONT ", stepping: 0x%x)\n", c->x86_mask); 1114 else 1115 printk(KERN_CONT ")\n"); 1116 1117 print_cpu_msr(c); 1118 } 1119 1120 void print_cpu_msr(struct cpuinfo_x86 *c) 1121 { 1122 if (c->cpu_index < show_msr) 1123 __print_cpu_msr(); 1124 } 1125 1126 static __init int setup_disablecpuid(char *arg) 1127 { 1128 int bit; 1129 1130 if (get_option(&arg, &bit) && bit < NCAPINTS*32) 1131 setup_clear_cpu_cap(bit); 1132 else 1133 return 0; 1134 1135 return 1; 1136 } 1137 __setup("clearcpuid=", setup_disablecpuid); 1138 1139 #ifdef CONFIG_X86_64 1140 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; 1141 struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, 1142 (unsigned long) debug_idt_table }; 1143 1144 DEFINE_PER_CPU_FIRST(union irq_stack_union, 1145 irq_stack_union) __aligned(PAGE_SIZE) __visible; 1146 1147 /* 1148 * The following percpu variables are hot. Align current_task to 1149 * cacheline size such that they fall in the same cacheline. 1150 */ 1151 DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = 1152 &init_task; 1153 EXPORT_PER_CPU_SYMBOL(current_task); 1154 1155 DEFINE_PER_CPU(char *, irq_stack_ptr) = 1156 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; 1157 1158 DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; 1159 1160 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; 1161 EXPORT_PER_CPU_SYMBOL(__preempt_count); 1162 1163 /* 1164 * Special IST stacks which the CPU switches to when it calls 1165 * an IST-marked descriptor entry. Up to 7 stacks (hardware 1166 * limit), all of them are 4K, except the debug stack which 1167 * is 8K. 1168 */ 1169 static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { 1170 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, 1171 [DEBUG_STACK - 1] = DEBUG_STKSZ 1172 }; 1173 1174 static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks 1175 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); 1176 1177 /* May not be marked __init: used by software suspend */ 1178 void syscall_init(void) 1179 { 1180 /* 1181 * LSTAR and STAR live in a bit strange symbiosis. 1182 * They both write to the same internal register. STAR allows to 1183 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. 1184 */ 1185 wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); 1186 wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); 1187 1188 #ifdef CONFIG_IA32_EMULATION 1189 wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat); 1190 /* 1191 * This only works on Intel CPUs. 1192 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. 1193 * This does not cause SYSENTER to jump to the wrong location, because 1194 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). 1195 */ 1196 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); 1197 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); 1198 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); 1199 #else 1200 wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); 1201 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); 1202 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); 1203 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); 1204 #endif 1205 1206 /* Flags to clear on syscall */ 1207 wrmsrl(MSR_SYSCALL_MASK, 1208 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF| 1209 X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT); 1210 } 1211 1212 /* 1213 * Copies of the original ist values from the tss are only accessed during 1214 * debugging, no special alignment required. 1215 */ 1216 DEFINE_PER_CPU(struct orig_ist, orig_ist); 1217 1218 static DEFINE_PER_CPU(unsigned long, debug_stack_addr); 1219 DEFINE_PER_CPU(int, debug_stack_usage); 1220 1221 int is_debug_stack(unsigned long addr) 1222 { 1223 return __this_cpu_read(debug_stack_usage) || 1224 (addr <= __this_cpu_read(debug_stack_addr) && 1225 addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ)); 1226 } 1227 NOKPROBE_SYMBOL(is_debug_stack); 1228 1229 DEFINE_PER_CPU(u32, debug_idt_ctr); 1230 1231 void debug_stack_set_zero(void) 1232 { 1233 this_cpu_inc(debug_idt_ctr); 1234 load_current_idt(); 1235 } 1236 NOKPROBE_SYMBOL(debug_stack_set_zero); 1237 1238 void debug_stack_reset(void) 1239 { 1240 if (WARN_ON(!this_cpu_read(debug_idt_ctr))) 1241 return; 1242 if (this_cpu_dec_return(debug_idt_ctr) == 0) 1243 load_current_idt(); 1244 } 1245 NOKPROBE_SYMBOL(debug_stack_reset); 1246 1247 #else /* CONFIG_X86_64 */ 1248 1249 DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 1250 EXPORT_PER_CPU_SYMBOL(current_task); 1251 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; 1252 EXPORT_PER_CPU_SYMBOL(__preempt_count); 1253 1254 /* 1255 * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find 1256 * the top of the kernel stack. Use an extra percpu variable to track the 1257 * top of the kernel stack directly. 1258 */ 1259 DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = 1260 (unsigned long)&init_thread_union + THREAD_SIZE; 1261 EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); 1262 1263 #ifdef CONFIG_CC_STACKPROTECTOR 1264 DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 1265 #endif 1266 1267 #endif /* CONFIG_X86_64 */ 1268 1269 /* 1270 * Clear all 6 debug registers: 1271 */ 1272 static void clear_all_debug_regs(void) 1273 { 1274 int i; 1275 1276 for (i = 0; i < 8; i++) { 1277 /* Ignore db4, db5 */ 1278 if ((i == 4) || (i == 5)) 1279 continue; 1280 1281 set_debugreg(0, i); 1282 } 1283 } 1284 1285 #ifdef CONFIG_KGDB 1286 /* 1287 * Restore debug regs if using kgdbwait and you have a kernel debugger 1288 * connection established. 1289 */ 1290 static void dbg_restore_debug_regs(void) 1291 { 1292 if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) 1293 arch_kgdb_ops.correct_hw_break(); 1294 } 1295 #else /* ! CONFIG_KGDB */ 1296 #define dbg_restore_debug_regs() 1297 #endif /* ! CONFIG_KGDB */ 1298 1299 static void wait_for_master_cpu(int cpu) 1300 { 1301 #ifdef CONFIG_SMP 1302 /* 1303 * wait for ACK from master CPU before continuing 1304 * with AP initialization 1305 */ 1306 WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)); 1307 while (!cpumask_test_cpu(cpu, cpu_callout_mask)) 1308 cpu_relax(); 1309 #endif 1310 } 1311 1312 /* 1313 * cpu_init() initializes state that is per-CPU. Some data is already 1314 * initialized (naturally) in the bootstrap process, such as the GDT 1315 * and IDT. We reload them nevertheless, this function acts as a 1316 * 'CPU state barrier', nothing should get across. 1317 * A lot of state is already set up in PDA init for 64 bit 1318 */ 1319 #ifdef CONFIG_X86_64 1320 1321 void cpu_init(void) 1322 { 1323 struct orig_ist *oist; 1324 struct task_struct *me; 1325 struct tss_struct *t; 1326 unsigned long v; 1327 int cpu = stack_smp_processor_id(); 1328 int i; 1329 1330 wait_for_master_cpu(cpu); 1331 1332 /* 1333 * Initialize the CR4 shadow before doing anything that could 1334 * try to read it. 1335 */ 1336 cr4_init_shadow(); 1337 1338 /* 1339 * Load microcode on this cpu if a valid microcode is available. 1340 * This is early microcode loading procedure. 1341 */ 1342 load_ucode_ap(); 1343 1344 t = &per_cpu(cpu_tss, cpu); 1345 oist = &per_cpu(orig_ist, cpu); 1346 1347 #ifdef CONFIG_NUMA 1348 if (this_cpu_read(numa_node) == 0 && 1349 early_cpu_to_node(cpu) != NUMA_NO_NODE) 1350 set_numa_node(early_cpu_to_node(cpu)); 1351 #endif 1352 1353 me = current; 1354 1355 pr_debug("Initializing CPU#%d\n", cpu); 1356 1357 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1358 1359 /* 1360 * Initialize the per-CPU GDT with the boot GDT, 1361 * and set up the GDT descriptor: 1362 */ 1363 1364 switch_to_new_gdt(cpu); 1365 loadsegment(fs, 0); 1366 1367 load_current_idt(); 1368 1369 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 1370 syscall_init(); 1371 1372 wrmsrl(MSR_FS_BASE, 0); 1373 wrmsrl(MSR_KERNEL_GS_BASE, 0); 1374 barrier(); 1375 1376 x86_configure_nx(); 1377 x2apic_setup(); 1378 1379 /* 1380 * set up and load the per-CPU TSS 1381 */ 1382 if (!oist->ist[0]) { 1383 char *estacks = per_cpu(exception_stacks, cpu); 1384 1385 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 1386 estacks += exception_stack_sizes[v]; 1387 oist->ist[v] = t->x86_tss.ist[v] = 1388 (unsigned long)estacks; 1389 if (v == DEBUG_STACK-1) 1390 per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks; 1391 } 1392 } 1393 1394 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1395 1396 /* 1397 * <= is required because the CPU will access up to 1398 * 8 bits beyond the end of the IO permission bitmap. 1399 */ 1400 for (i = 0; i <= IO_BITMAP_LONGS; i++) 1401 t->io_bitmap[i] = ~0UL; 1402 1403 atomic_inc(&init_mm.mm_count); 1404 me->active_mm = &init_mm; 1405 BUG_ON(me->mm); 1406 enter_lazy_tlb(&init_mm, me); 1407 1408 load_sp0(t, ¤t->thread); 1409 set_tss_desc(cpu, t); 1410 load_TR_desc(); 1411 load_mm_ldt(&init_mm); 1412 1413 clear_all_debug_regs(); 1414 dbg_restore_debug_regs(); 1415 1416 fpu__init_cpu(); 1417 1418 if (is_uv_system()) 1419 uv_cpu_init(); 1420 } 1421 1422 #else 1423 1424 void cpu_init(void) 1425 { 1426 int cpu = smp_processor_id(); 1427 struct task_struct *curr = current; 1428 struct tss_struct *t = &per_cpu(cpu_tss, cpu); 1429 struct thread_struct *thread = &curr->thread; 1430 1431 wait_for_master_cpu(cpu); 1432 1433 /* 1434 * Initialize the CR4 shadow before doing anything that could 1435 * try to read it. 1436 */ 1437 cr4_init_shadow(); 1438 1439 show_ucode_info_early(); 1440 1441 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1442 1443 if (cpu_feature_enabled(X86_FEATURE_VME) || 1444 cpu_has_tsc || 1445 boot_cpu_has(X86_FEATURE_DE)) 1446 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1447 1448 load_current_idt(); 1449 switch_to_new_gdt(cpu); 1450 1451 /* 1452 * Set up and load the per-CPU TSS and LDT 1453 */ 1454 atomic_inc(&init_mm.mm_count); 1455 curr->active_mm = &init_mm; 1456 BUG_ON(curr->mm); 1457 enter_lazy_tlb(&init_mm, curr); 1458 1459 load_sp0(t, thread); 1460 set_tss_desc(cpu, t); 1461 load_TR_desc(); 1462 load_mm_ldt(&init_mm); 1463 1464 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1465 1466 #ifdef CONFIG_DOUBLEFAULT 1467 /* Set up doublefault TSS pointer in the GDT */ 1468 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1469 #endif 1470 1471 clear_all_debug_regs(); 1472 dbg_restore_debug_regs(); 1473 1474 fpu__init_cpu(); 1475 } 1476 #endif 1477 1478 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS 1479 void warn_pre_alternatives(void) 1480 { 1481 WARN(1, "You're using static_cpu_has before alternatives have run!\n"); 1482 } 1483 EXPORT_SYMBOL_GPL(warn_pre_alternatives); 1484 #endif 1485 1486 inline bool __static_cpu_has_safe(u16 bit) 1487 { 1488 return boot_cpu_has(bit); 1489 } 1490 EXPORT_SYMBOL_GPL(__static_cpu_has_safe); 1491 1492 static void bsp_resume(void) 1493 { 1494 if (this_cpu->c_bsp_resume) 1495 this_cpu->c_bsp_resume(&boot_cpu_data); 1496 } 1497 1498 static struct syscore_ops cpu_syscore_ops = { 1499 .resume = bsp_resume, 1500 }; 1501 1502 static int __init init_cpu_syscore(void) 1503 { 1504 register_syscore_ops(&cpu_syscore_ops); 1505 return 0; 1506 } 1507 core_initcall(init_cpu_syscore); 1508