1 // SPDX-License-Identifier: GPL-2.0-only 2 /* cpu_feature_enabled() cannot be used this early */ 3 #define USE_EARLY_PGTABLE_L5 4 5 #include <linux/memblock.h> 6 #include <linux/linkage.h> 7 #include <linux/bitops.h> 8 #include <linux/kernel.h> 9 #include <linux/export.h> 10 #include <linux/percpu.h> 11 #include <linux/string.h> 12 #include <linux/ctype.h> 13 #include <linux/delay.h> 14 #include <linux/sched/mm.h> 15 #include <linux/sched/clock.h> 16 #include <linux/sched/task.h> 17 #include <linux/init.h> 18 #include <linux/kprobes.h> 19 #include <linux/kgdb.h> 20 #include <linux/smp.h> 21 #include <linux/io.h> 22 #include <linux/syscore_ops.h> 23 24 #include <asm/stackprotector.h> 25 #include <asm/perf_event.h> 26 #include <asm/mmu_context.h> 27 #include <asm/archrandom.h> 28 #include <asm/hypervisor.h> 29 #include <asm/processor.h> 30 #include <asm/tlbflush.h> 31 #include <asm/debugreg.h> 32 #include <asm/sections.h> 33 #include <asm/vsyscall.h> 34 #include <linux/topology.h> 35 #include <linux/cpumask.h> 36 #include <asm/pgtable.h> 37 #include <linux/atomic.h> 38 #include <asm/proto.h> 39 #include <asm/setup.h> 40 #include <asm/apic.h> 41 #include <asm/desc.h> 42 #include <asm/fpu/internal.h> 43 #include <asm/mtrr.h> 44 #include <asm/hwcap2.h> 45 #include <linux/numa.h> 46 #include <asm/asm.h> 47 #include <asm/bugs.h> 48 #include <asm/cpu.h> 49 #include <asm/mce.h> 50 #include <asm/msr.h> 51 #include <asm/pat.h> 52 #include <asm/microcode.h> 53 #include <asm/microcode_intel.h> 54 #include <asm/intel-family.h> 55 #include <asm/cpu_device_id.h> 56 57 #ifdef CONFIG_X86_LOCAL_APIC 58 #include <asm/uv/uv.h> 59 #endif 60 61 #include "cpu.h" 62 63 u32 elf_hwcap2 __read_mostly; 64 65 /* all of these masks are initialized in setup_cpu_local_masks() */ 66 cpumask_var_t cpu_initialized_mask; 67 cpumask_var_t cpu_callout_mask; 68 cpumask_var_t cpu_callin_mask; 69 70 /* representing cpus for which sibling maps can be computed */ 71 cpumask_var_t cpu_sibling_setup_mask; 72 73 /* Number of siblings per CPU package */ 74 int smp_num_siblings = 1; 75 EXPORT_SYMBOL(smp_num_siblings); 76 77 /* Last level cache ID of each logical CPU */ 78 DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; 79 80 /* correctly size the local cpu masks */ 81 void __init setup_cpu_local_masks(void) 82 { 83 alloc_bootmem_cpumask_var(&cpu_initialized_mask); 84 alloc_bootmem_cpumask_var(&cpu_callin_mask); 85 alloc_bootmem_cpumask_var(&cpu_callout_mask); 86 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 87 } 88 89 static void default_init(struct cpuinfo_x86 *c) 90 { 91 #ifdef CONFIG_X86_64 92 cpu_detect_cache_sizes(c); 93 #else 94 /* Not much we can do here... */ 95 /* Check if at least it has cpuid */ 96 if (c->cpuid_level == -1) { 97 /* No cpuid. It must be an ancient CPU */ 98 if (c->x86 == 4) 99 strcpy(c->x86_model_id, "486"); 100 else if (c->x86 == 3) 101 strcpy(c->x86_model_id, "386"); 102 } 103 #endif 104 } 105 106 static const struct cpu_dev default_cpu = { 107 .c_init = default_init, 108 .c_vendor = "Unknown", 109 .c_x86_vendor = X86_VENDOR_UNKNOWN, 110 }; 111 112 static const struct cpu_dev *this_cpu = &default_cpu; 113 114 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 115 #ifdef CONFIG_X86_64 116 /* 117 * We need valid kernel segments for data and code in long mode too 118 * IRET will check the segment types kkeil 2000/10/28 119 * Also sysret mandates a special GDT layout 120 * 121 * TLS descriptors are currently at a different place compared to i386. 122 * Hopefully nobody expects them at a fixed place (Wine?) 123 */ 124 [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), 125 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), 126 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), 127 [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), 128 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), 129 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), 130 #else 131 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), 132 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 133 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), 134 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), 135 /* 136 * Segments used for calling PnP BIOS have byte granularity. 137 * They code segments and data segments have fixed 64k limits, 138 * the transfer segment sizes are set at run time. 139 */ 140 /* 32-bit code */ 141 [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 142 /* 16-bit code */ 143 [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 144 /* 16-bit data */ 145 [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), 146 /* 16-bit data */ 147 [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), 148 /* 16-bit data */ 149 [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), 150 /* 151 * The APM segments have byte granularity and their bases 152 * are set at run time. All have 64k limits. 153 */ 154 /* 32-bit code */ 155 [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 156 /* 16-bit code */ 157 [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 158 /* data */ 159 [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), 160 161 [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 162 [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 163 GDT_STACK_CANARY_INIT 164 #endif 165 } }; 166 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 167 168 static int __init x86_mpx_setup(char *s) 169 { 170 /* require an exact match without trailing characters */ 171 if (strlen(s)) 172 return 0; 173 174 /* do not emit a message if the feature is not present */ 175 if (!boot_cpu_has(X86_FEATURE_MPX)) 176 return 1; 177 178 setup_clear_cpu_cap(X86_FEATURE_MPX); 179 pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n"); 180 return 1; 181 } 182 __setup("nompx", x86_mpx_setup); 183 184 #ifdef CONFIG_X86_64 185 static int __init x86_nopcid_setup(char *s) 186 { 187 /* nopcid doesn't accept parameters */ 188 if (s) 189 return -EINVAL; 190 191 /* do not emit a message if the feature is not present */ 192 if (!boot_cpu_has(X86_FEATURE_PCID)) 193 return 0; 194 195 setup_clear_cpu_cap(X86_FEATURE_PCID); 196 pr_info("nopcid: PCID feature disabled\n"); 197 return 0; 198 } 199 early_param("nopcid", x86_nopcid_setup); 200 #endif 201 202 static int __init x86_noinvpcid_setup(char *s) 203 { 204 /* noinvpcid doesn't accept parameters */ 205 if (s) 206 return -EINVAL; 207 208 /* do not emit a message if the feature is not present */ 209 if (!boot_cpu_has(X86_FEATURE_INVPCID)) 210 return 0; 211 212 setup_clear_cpu_cap(X86_FEATURE_INVPCID); 213 pr_info("noinvpcid: INVPCID feature disabled\n"); 214 return 0; 215 } 216 early_param("noinvpcid", x86_noinvpcid_setup); 217 218 #ifdef CONFIG_X86_32 219 static int cachesize_override = -1; 220 static int disable_x86_serial_nr = 1; 221 222 static int __init cachesize_setup(char *str) 223 { 224 get_option(&str, &cachesize_override); 225 return 1; 226 } 227 __setup("cachesize=", cachesize_setup); 228 229 static int __init x86_sep_setup(char *s) 230 { 231 setup_clear_cpu_cap(X86_FEATURE_SEP); 232 return 1; 233 } 234 __setup("nosep", x86_sep_setup); 235 236 /* Standard macro to see if a specific flag is changeable */ 237 static inline int flag_is_changeable_p(u32 flag) 238 { 239 u32 f1, f2; 240 241 /* 242 * Cyrix and IDT cpus allow disabling of CPUID 243 * so the code below may return different results 244 * when it is executed before and after enabling 245 * the CPUID. Add "volatile" to not allow gcc to 246 * optimize the subsequent calls to this function. 247 */ 248 asm volatile ("pushfl \n\t" 249 "pushfl \n\t" 250 "popl %0 \n\t" 251 "movl %0, %1 \n\t" 252 "xorl %2, %0 \n\t" 253 "pushl %0 \n\t" 254 "popfl \n\t" 255 "pushfl \n\t" 256 "popl %0 \n\t" 257 "popfl \n\t" 258 259 : "=&r" (f1), "=&r" (f2) 260 : "ir" (flag)); 261 262 return ((f1^f2) & flag) != 0; 263 } 264 265 /* Probe for the CPUID instruction */ 266 int have_cpuid_p(void) 267 { 268 return flag_is_changeable_p(X86_EFLAGS_ID); 269 } 270 271 static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 272 { 273 unsigned long lo, hi; 274 275 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) 276 return; 277 278 /* Disable processor serial number: */ 279 280 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 281 lo |= 0x200000; 282 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 283 284 pr_notice("CPU serial number disabled.\n"); 285 clear_cpu_cap(c, X86_FEATURE_PN); 286 287 /* Disabling the serial number may affect the cpuid level */ 288 c->cpuid_level = cpuid_eax(0); 289 } 290 291 static int __init x86_serial_nr_setup(char *s) 292 { 293 disable_x86_serial_nr = 0; 294 return 1; 295 } 296 __setup("serialnumber", x86_serial_nr_setup); 297 #else 298 static inline int flag_is_changeable_p(u32 flag) 299 { 300 return 1; 301 } 302 static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 303 { 304 } 305 #endif 306 307 static __init int setup_disable_smep(char *arg) 308 { 309 setup_clear_cpu_cap(X86_FEATURE_SMEP); 310 /* Check for things that depend on SMEP being enabled: */ 311 check_mpx_erratum(&boot_cpu_data); 312 return 1; 313 } 314 __setup("nosmep", setup_disable_smep); 315 316 static __always_inline void setup_smep(struct cpuinfo_x86 *c) 317 { 318 if (cpu_has(c, X86_FEATURE_SMEP)) 319 cr4_set_bits(X86_CR4_SMEP); 320 } 321 322 static __init int setup_disable_smap(char *arg) 323 { 324 setup_clear_cpu_cap(X86_FEATURE_SMAP); 325 return 1; 326 } 327 __setup("nosmap", setup_disable_smap); 328 329 static __always_inline void setup_smap(struct cpuinfo_x86 *c) 330 { 331 unsigned long eflags = native_save_fl(); 332 333 /* This should have been cleared long ago */ 334 BUG_ON(eflags & X86_EFLAGS_AC); 335 336 if (cpu_has(c, X86_FEATURE_SMAP)) { 337 #ifdef CONFIG_X86_SMAP 338 cr4_set_bits(X86_CR4_SMAP); 339 #else 340 cr4_clear_bits(X86_CR4_SMAP); 341 #endif 342 } 343 } 344 345 static __always_inline void setup_umip(struct cpuinfo_x86 *c) 346 { 347 /* Check the boot processor, plus build option for UMIP. */ 348 if (!cpu_feature_enabled(X86_FEATURE_UMIP)) 349 goto out; 350 351 /* Check the current processor's cpuid bits. */ 352 if (!cpu_has(c, X86_FEATURE_UMIP)) 353 goto out; 354 355 cr4_set_bits(X86_CR4_UMIP); 356 357 pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n"); 358 359 return; 360 361 out: 362 /* 363 * Make sure UMIP is disabled in case it was enabled in a 364 * previous boot (e.g., via kexec). 365 */ 366 cr4_clear_bits(X86_CR4_UMIP); 367 } 368 369 static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning); 370 static unsigned long cr4_pinned_bits __ro_after_init; 371 372 void native_write_cr0(unsigned long val) 373 { 374 unsigned long bits_missing = 0; 375 376 set_register: 377 asm volatile("mov %0,%%cr0": "+r" (val), "+m" (__force_order)); 378 379 if (static_branch_likely(&cr_pinning)) { 380 if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) { 381 bits_missing = X86_CR0_WP; 382 val |= bits_missing; 383 goto set_register; 384 } 385 /* Warn after we've set the missing bits. */ 386 WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n"); 387 } 388 } 389 EXPORT_SYMBOL(native_write_cr0); 390 391 void native_write_cr4(unsigned long val) 392 { 393 unsigned long bits_missing = 0; 394 395 set_register: 396 asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits)); 397 398 if (static_branch_likely(&cr_pinning)) { 399 if (unlikely((val & cr4_pinned_bits) != cr4_pinned_bits)) { 400 bits_missing = ~val & cr4_pinned_bits; 401 val |= bits_missing; 402 goto set_register; 403 } 404 /* Warn after we've set the missing bits. */ 405 WARN_ONCE(bits_missing, "CR4 bits went missing: %lx!?\n", 406 bits_missing); 407 } 408 } 409 EXPORT_SYMBOL(native_write_cr4); 410 411 void cr4_init(void) 412 { 413 unsigned long cr4 = __read_cr4(); 414 415 if (boot_cpu_has(X86_FEATURE_PCID)) 416 cr4 |= X86_CR4_PCIDE; 417 if (static_branch_likely(&cr_pinning)) 418 cr4 |= cr4_pinned_bits; 419 420 __write_cr4(cr4); 421 422 /* Initialize cr4 shadow for this CPU. */ 423 this_cpu_write(cpu_tlbstate.cr4, cr4); 424 } 425 426 /* 427 * Once CPU feature detection is finished (and boot params have been 428 * parsed), record any of the sensitive CR bits that are set, and 429 * enable CR pinning. 430 */ 431 static void __init setup_cr_pinning(void) 432 { 433 unsigned long mask; 434 435 mask = (X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP); 436 cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & mask; 437 static_key_enable(&cr_pinning.key); 438 } 439 440 /* 441 * Protection Keys are not available in 32-bit mode. 442 */ 443 static bool pku_disabled; 444 445 static __always_inline void setup_pku(struct cpuinfo_x86 *c) 446 { 447 struct pkru_state *pk; 448 449 /* check the boot processor, plus compile options for PKU: */ 450 if (!cpu_feature_enabled(X86_FEATURE_PKU)) 451 return; 452 /* checks the actual processor's cpuid bits: */ 453 if (!cpu_has(c, X86_FEATURE_PKU)) 454 return; 455 if (pku_disabled) 456 return; 457 458 cr4_set_bits(X86_CR4_PKE); 459 pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU); 460 if (pk) 461 pk->pkru = init_pkru_value; 462 /* 463 * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE 464 * cpuid bit to be set. We need to ensure that we 465 * update that bit in this CPU's "cpu_info". 466 */ 467 get_cpu_cap(c); 468 } 469 470 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 471 static __init int setup_disable_pku(char *arg) 472 { 473 /* 474 * Do not clear the X86_FEATURE_PKU bit. All of the 475 * runtime checks are against OSPKE so clearing the 476 * bit does nothing. 477 * 478 * This way, we will see "pku" in cpuinfo, but not 479 * "ospke", which is exactly what we want. It shows 480 * that the CPU has PKU, but the OS has not enabled it. 481 * This happens to be exactly how a system would look 482 * if we disabled the config option. 483 */ 484 pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n"); 485 pku_disabled = true; 486 return 1; 487 } 488 __setup("nopku", setup_disable_pku); 489 #endif /* CONFIG_X86_64 */ 490 491 /* 492 * Some CPU features depend on higher CPUID levels, which may not always 493 * be available due to CPUID level capping or broken virtualization 494 * software. Add those features to this table to auto-disable them. 495 */ 496 struct cpuid_dependent_feature { 497 u32 feature; 498 u32 level; 499 }; 500 501 static const struct cpuid_dependent_feature 502 cpuid_dependent_features[] = { 503 { X86_FEATURE_MWAIT, 0x00000005 }, 504 { X86_FEATURE_DCA, 0x00000009 }, 505 { X86_FEATURE_XSAVE, 0x0000000d }, 506 { 0, 0 } 507 }; 508 509 static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 510 { 511 const struct cpuid_dependent_feature *df; 512 513 for (df = cpuid_dependent_features; df->feature; df++) { 514 515 if (!cpu_has(c, df->feature)) 516 continue; 517 /* 518 * Note: cpuid_level is set to -1 if unavailable, but 519 * extended_extended_level is set to 0 if unavailable 520 * and the legitimate extended levels are all negative 521 * when signed; hence the weird messing around with 522 * signs here... 523 */ 524 if (!((s32)df->level < 0 ? 525 (u32)df->level > (u32)c->extended_cpuid_level : 526 (s32)df->level > (s32)c->cpuid_level)) 527 continue; 528 529 clear_cpu_cap(c, df->feature); 530 if (!warn) 531 continue; 532 533 pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", 534 x86_cap_flag(df->feature), df->level); 535 } 536 } 537 538 /* 539 * Naming convention should be: <Name> [(<Codename>)] 540 * This table only is used unless init_<vendor>() below doesn't set it; 541 * in particular, if CPUID levels 0x80000002..4 are supported, this 542 * isn't used 543 */ 544 545 /* Look up CPU names by table lookup. */ 546 static const char *table_lookup_model(struct cpuinfo_x86 *c) 547 { 548 #ifdef CONFIG_X86_32 549 const struct legacy_cpu_model_info *info; 550 551 if (c->x86_model >= 16) 552 return NULL; /* Range check */ 553 554 if (!this_cpu) 555 return NULL; 556 557 info = this_cpu->legacy_models; 558 559 while (info->family) { 560 if (info->family == c->x86) 561 return info->model_names[c->x86_model]; 562 info++; 563 } 564 #endif 565 return NULL; /* Not found */ 566 } 567 568 __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; 569 __u32 cpu_caps_set[NCAPINTS + NBUGINTS]; 570 571 void load_percpu_segment(int cpu) 572 { 573 #ifdef CONFIG_X86_32 574 loadsegment(fs, __KERNEL_PERCPU); 575 #else 576 __loadsegment_simple(gs, 0); 577 wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu)); 578 #endif 579 load_stack_canary_segment(); 580 } 581 582 #ifdef CONFIG_X86_32 583 /* The 32-bit entry code needs to find cpu_entry_area. */ 584 DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); 585 #endif 586 587 /* Load the original GDT from the per-cpu structure */ 588 void load_direct_gdt(int cpu) 589 { 590 struct desc_ptr gdt_descr; 591 592 gdt_descr.address = (long)get_cpu_gdt_rw(cpu); 593 gdt_descr.size = GDT_SIZE - 1; 594 load_gdt(&gdt_descr); 595 } 596 EXPORT_SYMBOL_GPL(load_direct_gdt); 597 598 /* Load a fixmap remapping of the per-cpu GDT */ 599 void load_fixmap_gdt(int cpu) 600 { 601 struct desc_ptr gdt_descr; 602 603 gdt_descr.address = (long)get_cpu_gdt_ro(cpu); 604 gdt_descr.size = GDT_SIZE - 1; 605 load_gdt(&gdt_descr); 606 } 607 EXPORT_SYMBOL_GPL(load_fixmap_gdt); 608 609 /* 610 * Current gdt points %fs at the "master" per-cpu area: after this, 611 * it's on the real one. 612 */ 613 void switch_to_new_gdt(int cpu) 614 { 615 /* Load the original GDT */ 616 load_direct_gdt(cpu); 617 /* Reload the per-cpu base */ 618 load_percpu_segment(cpu); 619 } 620 621 static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 622 623 static void get_model_name(struct cpuinfo_x86 *c) 624 { 625 unsigned int *v; 626 char *p, *q, *s; 627 628 if (c->extended_cpuid_level < 0x80000004) 629 return; 630 631 v = (unsigned int *)c->x86_model_id; 632 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); 633 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); 634 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 635 c->x86_model_id[48] = 0; 636 637 /* Trim whitespace */ 638 p = q = s = &c->x86_model_id[0]; 639 640 while (*p == ' ') 641 p++; 642 643 while (*p) { 644 /* Note the last non-whitespace index */ 645 if (!isspace(*p)) 646 s = q; 647 648 *q++ = *p++; 649 } 650 651 *(s + 1) = '\0'; 652 } 653 654 void detect_num_cpu_cores(struct cpuinfo_x86 *c) 655 { 656 unsigned int eax, ebx, ecx, edx; 657 658 c->x86_max_cores = 1; 659 if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4) 660 return; 661 662 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); 663 if (eax & 0x1f) 664 c->x86_max_cores = (eax >> 26) + 1; 665 } 666 667 void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) 668 { 669 unsigned int n, dummy, ebx, ecx, edx, l2size; 670 671 n = c->extended_cpuid_level; 672 673 if (n >= 0x80000005) { 674 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); 675 c->x86_cache_size = (ecx>>24) + (edx>>24); 676 #ifdef CONFIG_X86_64 677 /* On K8 L1 TLB is inclusive, so don't count it */ 678 c->x86_tlbsize = 0; 679 #endif 680 } 681 682 if (n < 0x80000006) /* Some chips just has a large L1. */ 683 return; 684 685 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); 686 l2size = ecx >> 16; 687 688 #ifdef CONFIG_X86_64 689 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); 690 #else 691 /* do processor-specific cache resizing */ 692 if (this_cpu->legacy_cache_size) 693 l2size = this_cpu->legacy_cache_size(c, l2size); 694 695 /* Allow user to override all this if necessary. */ 696 if (cachesize_override != -1) 697 l2size = cachesize_override; 698 699 if (l2size == 0) 700 return; /* Again, no L2 cache is possible */ 701 #endif 702 703 c->x86_cache_size = l2size; 704 } 705 706 u16 __read_mostly tlb_lli_4k[NR_INFO]; 707 u16 __read_mostly tlb_lli_2m[NR_INFO]; 708 u16 __read_mostly tlb_lli_4m[NR_INFO]; 709 u16 __read_mostly tlb_lld_4k[NR_INFO]; 710 u16 __read_mostly tlb_lld_2m[NR_INFO]; 711 u16 __read_mostly tlb_lld_4m[NR_INFO]; 712 u16 __read_mostly tlb_lld_1g[NR_INFO]; 713 714 static void cpu_detect_tlb(struct cpuinfo_x86 *c) 715 { 716 if (this_cpu->c_detect_tlb) 717 this_cpu->c_detect_tlb(c); 718 719 pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n", 720 tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], 721 tlb_lli_4m[ENTRIES]); 722 723 pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n", 724 tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES], 725 tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]); 726 } 727 728 int detect_ht_early(struct cpuinfo_x86 *c) 729 { 730 #ifdef CONFIG_SMP 731 u32 eax, ebx, ecx, edx; 732 733 if (!cpu_has(c, X86_FEATURE_HT)) 734 return -1; 735 736 if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) 737 return -1; 738 739 if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) 740 return -1; 741 742 cpuid(1, &eax, &ebx, &ecx, &edx); 743 744 smp_num_siblings = (ebx & 0xff0000) >> 16; 745 if (smp_num_siblings == 1) 746 pr_info_once("CPU0: Hyper-Threading is disabled\n"); 747 #endif 748 return 0; 749 } 750 751 void detect_ht(struct cpuinfo_x86 *c) 752 { 753 #ifdef CONFIG_SMP 754 int index_msb, core_bits; 755 756 if (detect_ht_early(c) < 0) 757 return; 758 759 index_msb = get_count_order(smp_num_siblings); 760 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); 761 762 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 763 764 index_msb = get_count_order(smp_num_siblings); 765 766 core_bits = get_count_order(c->x86_max_cores); 767 768 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & 769 ((1 << core_bits) - 1); 770 #endif 771 } 772 773 static void get_cpu_vendor(struct cpuinfo_x86 *c) 774 { 775 char *v = c->x86_vendor_id; 776 int i; 777 778 for (i = 0; i < X86_VENDOR_NUM; i++) { 779 if (!cpu_devs[i]) 780 break; 781 782 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 783 (cpu_devs[i]->c_ident[1] && 784 !strcmp(v, cpu_devs[i]->c_ident[1]))) { 785 786 this_cpu = cpu_devs[i]; 787 c->x86_vendor = this_cpu->c_x86_vendor; 788 return; 789 } 790 } 791 792 pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \ 793 "CPU: Your system may be unstable.\n", v); 794 795 c->x86_vendor = X86_VENDOR_UNKNOWN; 796 this_cpu = &default_cpu; 797 } 798 799 void cpu_detect(struct cpuinfo_x86 *c) 800 { 801 /* Get vendor name */ 802 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 803 (unsigned int *)&c->x86_vendor_id[0], 804 (unsigned int *)&c->x86_vendor_id[8], 805 (unsigned int *)&c->x86_vendor_id[4]); 806 807 c->x86 = 4; 808 /* Intel-defined flags: level 0x00000001 */ 809 if (c->cpuid_level >= 0x00000001) { 810 u32 junk, tfms, cap0, misc; 811 812 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 813 c->x86 = x86_family(tfms); 814 c->x86_model = x86_model(tfms); 815 c->x86_stepping = x86_stepping(tfms); 816 817 if (cap0 & (1<<19)) { 818 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 819 c->x86_cache_alignment = c->x86_clflush_size; 820 } 821 } 822 } 823 824 static void apply_forced_caps(struct cpuinfo_x86 *c) 825 { 826 int i; 827 828 for (i = 0; i < NCAPINTS + NBUGINTS; i++) { 829 c->x86_capability[i] &= ~cpu_caps_cleared[i]; 830 c->x86_capability[i] |= cpu_caps_set[i]; 831 } 832 } 833 834 static void init_speculation_control(struct cpuinfo_x86 *c) 835 { 836 /* 837 * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support, 838 * and they also have a different bit for STIBP support. Also, 839 * a hypervisor might have set the individual AMD bits even on 840 * Intel CPUs, for finer-grained selection of what's available. 841 */ 842 if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { 843 set_cpu_cap(c, X86_FEATURE_IBRS); 844 set_cpu_cap(c, X86_FEATURE_IBPB); 845 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 846 } 847 848 if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) 849 set_cpu_cap(c, X86_FEATURE_STIBP); 850 851 if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) || 852 cpu_has(c, X86_FEATURE_VIRT_SSBD)) 853 set_cpu_cap(c, X86_FEATURE_SSBD); 854 855 if (cpu_has(c, X86_FEATURE_AMD_IBRS)) { 856 set_cpu_cap(c, X86_FEATURE_IBRS); 857 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 858 } 859 860 if (cpu_has(c, X86_FEATURE_AMD_IBPB)) 861 set_cpu_cap(c, X86_FEATURE_IBPB); 862 863 if (cpu_has(c, X86_FEATURE_AMD_STIBP)) { 864 set_cpu_cap(c, X86_FEATURE_STIBP); 865 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 866 } 867 868 if (cpu_has(c, X86_FEATURE_AMD_SSBD)) { 869 set_cpu_cap(c, X86_FEATURE_SSBD); 870 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 871 clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD); 872 } 873 } 874 875 static void init_cqm(struct cpuinfo_x86 *c) 876 { 877 if (!cpu_has(c, X86_FEATURE_CQM_LLC)) { 878 c->x86_cache_max_rmid = -1; 879 c->x86_cache_occ_scale = -1; 880 return; 881 } 882 883 /* will be overridden if occupancy monitoring exists */ 884 c->x86_cache_max_rmid = cpuid_ebx(0xf); 885 886 if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) || 887 cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) || 888 cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) { 889 u32 eax, ebx, ecx, edx; 890 891 /* QoS sub-leaf, EAX=0Fh, ECX=1 */ 892 cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx); 893 894 c->x86_cache_max_rmid = ecx; 895 c->x86_cache_occ_scale = ebx; 896 } 897 } 898 899 void get_cpu_cap(struct cpuinfo_x86 *c) 900 { 901 u32 eax, ebx, ecx, edx; 902 903 /* Intel-defined flags: level 0x00000001 */ 904 if (c->cpuid_level >= 0x00000001) { 905 cpuid(0x00000001, &eax, &ebx, &ecx, &edx); 906 907 c->x86_capability[CPUID_1_ECX] = ecx; 908 c->x86_capability[CPUID_1_EDX] = edx; 909 } 910 911 /* Thermal and Power Management Leaf: level 0x00000006 (eax) */ 912 if (c->cpuid_level >= 0x00000006) 913 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); 914 915 /* Additional Intel-defined flags: level 0x00000007 */ 916 if (c->cpuid_level >= 0x00000007) { 917 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); 918 c->x86_capability[CPUID_7_0_EBX] = ebx; 919 c->x86_capability[CPUID_7_ECX] = ecx; 920 c->x86_capability[CPUID_7_EDX] = edx; 921 922 /* Check valid sub-leaf index before accessing it */ 923 if (eax >= 1) { 924 cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx); 925 c->x86_capability[CPUID_7_1_EAX] = eax; 926 } 927 } 928 929 /* Extended state features: level 0x0000000d */ 930 if (c->cpuid_level >= 0x0000000d) { 931 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); 932 933 c->x86_capability[CPUID_D_1_EAX] = eax; 934 } 935 936 /* AMD-defined flags: level 0x80000001 */ 937 eax = cpuid_eax(0x80000000); 938 c->extended_cpuid_level = eax; 939 940 if ((eax & 0xffff0000) == 0x80000000) { 941 if (eax >= 0x80000001) { 942 cpuid(0x80000001, &eax, &ebx, &ecx, &edx); 943 944 c->x86_capability[CPUID_8000_0001_ECX] = ecx; 945 c->x86_capability[CPUID_8000_0001_EDX] = edx; 946 } 947 } 948 949 if (c->extended_cpuid_level >= 0x80000007) { 950 cpuid(0x80000007, &eax, &ebx, &ecx, &edx); 951 952 c->x86_capability[CPUID_8000_0007_EBX] = ebx; 953 c->x86_power = edx; 954 } 955 956 if (c->extended_cpuid_level >= 0x80000008) { 957 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); 958 c->x86_capability[CPUID_8000_0008_EBX] = ebx; 959 } 960 961 if (c->extended_cpuid_level >= 0x8000000a) 962 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); 963 964 init_scattered_cpuid_features(c); 965 init_speculation_control(c); 966 init_cqm(c); 967 968 /* 969 * Clear/Set all flags overridden by options, after probe. 970 * This needs to happen each time we re-probe, which may happen 971 * several times during CPU initialization. 972 */ 973 apply_forced_caps(c); 974 } 975 976 void get_cpu_address_sizes(struct cpuinfo_x86 *c) 977 { 978 u32 eax, ebx, ecx, edx; 979 980 if (c->extended_cpuid_level >= 0x80000008) { 981 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); 982 983 c->x86_virt_bits = (eax >> 8) & 0xff; 984 c->x86_phys_bits = eax & 0xff; 985 } 986 #ifdef CONFIG_X86_32 987 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) 988 c->x86_phys_bits = 36; 989 #endif 990 c->x86_cache_bits = c->x86_phys_bits; 991 } 992 993 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) 994 { 995 #ifdef CONFIG_X86_32 996 int i; 997 998 /* 999 * First of all, decide if this is a 486 or higher 1000 * It's a 486 if we can modify the AC flag 1001 */ 1002 if (flag_is_changeable_p(X86_EFLAGS_AC)) 1003 c->x86 = 4; 1004 else 1005 c->x86 = 3; 1006 1007 for (i = 0; i < X86_VENDOR_NUM; i++) 1008 if (cpu_devs[i] && cpu_devs[i]->c_identify) { 1009 c->x86_vendor_id[0] = 0; 1010 cpu_devs[i]->c_identify(c); 1011 if (c->x86_vendor_id[0]) { 1012 get_cpu_vendor(c); 1013 break; 1014 } 1015 } 1016 #endif 1017 } 1018 1019 #define NO_SPECULATION BIT(0) 1020 #define NO_MELTDOWN BIT(1) 1021 #define NO_SSB BIT(2) 1022 #define NO_L1TF BIT(3) 1023 #define NO_MDS BIT(4) 1024 #define MSBDS_ONLY BIT(5) 1025 #define NO_SWAPGS BIT(6) 1026 1027 #define VULNWL(_vendor, _family, _model, _whitelist) \ 1028 { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist } 1029 1030 #define VULNWL_INTEL(model, whitelist) \ 1031 VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist) 1032 1033 #define VULNWL_AMD(family, whitelist) \ 1034 VULNWL(AMD, family, X86_MODEL_ANY, whitelist) 1035 1036 #define VULNWL_HYGON(family, whitelist) \ 1037 VULNWL(HYGON, family, X86_MODEL_ANY, whitelist) 1038 1039 static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { 1040 VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION), 1041 VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION), 1042 VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION), 1043 VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION), 1044 1045 /* Intel Family 6 */ 1046 VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION), 1047 VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION), 1048 VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION), 1049 VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION), 1050 VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION), 1051 1052 VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), 1053 VULNWL_INTEL(ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), 1054 VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), 1055 VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), 1056 VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), 1057 VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), 1058 1059 VULNWL_INTEL(CORE_YONAH, NO_SSB), 1060 1061 VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS), 1062 VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS), 1063 1064 VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS), 1065 VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS), 1066 VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS), 1067 1068 /* 1069 * Technically, swapgs isn't serializing on AMD (despite it previously 1070 * being documented as such in the APM). But according to AMD, %gs is 1071 * updated non-speculatively, and the issuing of %gs-relative memory 1072 * operands will be blocked until the %gs update completes, which is 1073 * good enough for our purposes. 1074 */ 1075 1076 /* AMD Family 0xf - 0x12 */ 1077 VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS), 1078 VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS), 1079 VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS), 1080 VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS), 1081 1082 /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ 1083 VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS), 1084 VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS), 1085 {} 1086 }; 1087 1088 static bool __init cpu_matches(unsigned long which) 1089 { 1090 const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist); 1091 1092 return m && !!(m->driver_data & which); 1093 } 1094 1095 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) 1096 { 1097 u64 ia32_cap = 0; 1098 1099 if (cpu_matches(NO_SPECULATION)) 1100 return; 1101 1102 setup_force_cpu_bug(X86_BUG_SPECTRE_V1); 1103 setup_force_cpu_bug(X86_BUG_SPECTRE_V2); 1104 1105 if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) 1106 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); 1107 1108 if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) && 1109 !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) 1110 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); 1111 1112 if (ia32_cap & ARCH_CAP_IBRS_ALL) 1113 setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); 1114 1115 if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) { 1116 setup_force_cpu_bug(X86_BUG_MDS); 1117 if (cpu_matches(MSBDS_ONLY)) 1118 setup_force_cpu_bug(X86_BUG_MSBDS_ONLY); 1119 } 1120 1121 if (!cpu_matches(NO_SWAPGS)) 1122 setup_force_cpu_bug(X86_BUG_SWAPGS); 1123 1124 if (cpu_matches(NO_MELTDOWN)) 1125 return; 1126 1127 /* Rogue Data Cache Load? No! */ 1128 if (ia32_cap & ARCH_CAP_RDCL_NO) 1129 return; 1130 1131 setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); 1132 1133 if (cpu_matches(NO_L1TF)) 1134 return; 1135 1136 setup_force_cpu_bug(X86_BUG_L1TF); 1137 } 1138 1139 /* 1140 * The NOPL instruction is supposed to exist on all CPUs of family >= 6; 1141 * unfortunately, that's not true in practice because of early VIA 1142 * chips and (more importantly) broken virtualizers that are not easy 1143 * to detect. In the latter case it doesn't even *fail* reliably, so 1144 * probing for it doesn't even work. Disable it completely on 32-bit 1145 * unless we can find a reliable way to detect all the broken cases. 1146 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). 1147 */ 1148 static void detect_nopl(void) 1149 { 1150 #ifdef CONFIG_X86_32 1151 setup_clear_cpu_cap(X86_FEATURE_NOPL); 1152 #else 1153 setup_force_cpu_cap(X86_FEATURE_NOPL); 1154 #endif 1155 } 1156 1157 /* 1158 * Do minimum CPU detection early. 1159 * Fields really needed: vendor, cpuid_level, family, model, mask, 1160 * cache alignment. 1161 * The others are not touched to avoid unwanted side effects. 1162 * 1163 * WARNING: this function is only called on the boot CPU. Don't add code 1164 * here that is supposed to run on all CPUs. 1165 */ 1166 static void __init early_identify_cpu(struct cpuinfo_x86 *c) 1167 { 1168 #ifdef CONFIG_X86_64 1169 c->x86_clflush_size = 64; 1170 c->x86_phys_bits = 36; 1171 c->x86_virt_bits = 48; 1172 #else 1173 c->x86_clflush_size = 32; 1174 c->x86_phys_bits = 32; 1175 c->x86_virt_bits = 32; 1176 #endif 1177 c->x86_cache_alignment = c->x86_clflush_size; 1178 1179 memset(&c->x86_capability, 0, sizeof(c->x86_capability)); 1180 c->extended_cpuid_level = 0; 1181 1182 if (!have_cpuid_p()) 1183 identify_cpu_without_cpuid(c); 1184 1185 /* cyrix could have cpuid enabled via c_identify()*/ 1186 if (have_cpuid_p()) { 1187 cpu_detect(c); 1188 get_cpu_vendor(c); 1189 get_cpu_cap(c); 1190 get_cpu_address_sizes(c); 1191 setup_force_cpu_cap(X86_FEATURE_CPUID); 1192 1193 if (this_cpu->c_early_init) 1194 this_cpu->c_early_init(c); 1195 1196 c->cpu_index = 0; 1197 filter_cpuid_features(c, false); 1198 1199 if (this_cpu->c_bsp_init) 1200 this_cpu->c_bsp_init(c); 1201 } else { 1202 setup_clear_cpu_cap(X86_FEATURE_CPUID); 1203 } 1204 1205 setup_force_cpu_cap(X86_FEATURE_ALWAYS); 1206 1207 cpu_set_bug_bits(c); 1208 1209 fpu__init_system(c); 1210 1211 #ifdef CONFIG_X86_32 1212 /* 1213 * Regardless of whether PCID is enumerated, the SDM says 1214 * that it can't be enabled in 32-bit mode. 1215 */ 1216 setup_clear_cpu_cap(X86_FEATURE_PCID); 1217 #endif 1218 1219 /* 1220 * Later in the boot process pgtable_l5_enabled() relies on 1221 * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not 1222 * enabled by this point we need to clear the feature bit to avoid 1223 * false-positives at the later stage. 1224 * 1225 * pgtable_l5_enabled() can be false here for several reasons: 1226 * - 5-level paging is disabled compile-time; 1227 * - it's 32-bit kernel; 1228 * - machine doesn't support 5-level paging; 1229 * - user specified 'no5lvl' in kernel command line. 1230 */ 1231 if (!pgtable_l5_enabled()) 1232 setup_clear_cpu_cap(X86_FEATURE_LA57); 1233 1234 detect_nopl(); 1235 } 1236 1237 void __init early_cpu_init(void) 1238 { 1239 const struct cpu_dev *const *cdev; 1240 int count = 0; 1241 1242 #ifdef CONFIG_PROCESSOR_SELECT 1243 pr_info("KERNEL supported cpus:\n"); 1244 #endif 1245 1246 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { 1247 const struct cpu_dev *cpudev = *cdev; 1248 1249 if (count >= X86_VENDOR_NUM) 1250 break; 1251 cpu_devs[count] = cpudev; 1252 count++; 1253 1254 #ifdef CONFIG_PROCESSOR_SELECT 1255 { 1256 unsigned int j; 1257 1258 for (j = 0; j < 2; j++) { 1259 if (!cpudev->c_ident[j]) 1260 continue; 1261 pr_info(" %s %s\n", cpudev->c_vendor, 1262 cpudev->c_ident[j]); 1263 } 1264 } 1265 #endif 1266 } 1267 early_identify_cpu(&boot_cpu_data); 1268 } 1269 1270 static void detect_null_seg_behavior(struct cpuinfo_x86 *c) 1271 { 1272 #ifdef CONFIG_X86_64 1273 /* 1274 * Empirically, writing zero to a segment selector on AMD does 1275 * not clear the base, whereas writing zero to a segment 1276 * selector on Intel does clear the base. Intel's behavior 1277 * allows slightly faster context switches in the common case 1278 * where GS is unused by the prev and next threads. 1279 * 1280 * Since neither vendor documents this anywhere that I can see, 1281 * detect it directly instead of hardcoding the choice by 1282 * vendor. 1283 * 1284 * I've designated AMD's behavior as the "bug" because it's 1285 * counterintuitive and less friendly. 1286 */ 1287 1288 unsigned long old_base, tmp; 1289 rdmsrl(MSR_FS_BASE, old_base); 1290 wrmsrl(MSR_FS_BASE, 1); 1291 loadsegment(fs, 0); 1292 rdmsrl(MSR_FS_BASE, tmp); 1293 if (tmp != 0) 1294 set_cpu_bug(c, X86_BUG_NULL_SEG); 1295 wrmsrl(MSR_FS_BASE, old_base); 1296 #endif 1297 } 1298 1299 static void generic_identify(struct cpuinfo_x86 *c) 1300 { 1301 c->extended_cpuid_level = 0; 1302 1303 if (!have_cpuid_p()) 1304 identify_cpu_without_cpuid(c); 1305 1306 /* cyrix could have cpuid enabled via c_identify()*/ 1307 if (!have_cpuid_p()) 1308 return; 1309 1310 cpu_detect(c); 1311 1312 get_cpu_vendor(c); 1313 1314 get_cpu_cap(c); 1315 1316 get_cpu_address_sizes(c); 1317 1318 if (c->cpuid_level >= 0x00000001) { 1319 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; 1320 #ifdef CONFIG_X86_32 1321 # ifdef CONFIG_SMP 1322 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 1323 # else 1324 c->apicid = c->initial_apicid; 1325 # endif 1326 #endif 1327 c->phys_proc_id = c->initial_apicid; 1328 } 1329 1330 get_model_name(c); /* Default name */ 1331 1332 detect_null_seg_behavior(c); 1333 1334 /* 1335 * ESPFIX is a strange bug. All real CPUs have it. Paravirt 1336 * systems that run Linux at CPL > 0 may or may not have the 1337 * issue, but, even if they have the issue, there's absolutely 1338 * nothing we can do about it because we can't use the real IRET 1339 * instruction. 1340 * 1341 * NB: For the time being, only 32-bit kernels support 1342 * X86_BUG_ESPFIX as such. 64-bit kernels directly choose 1343 * whether to apply espfix using paravirt hooks. If any 1344 * non-paravirt system ever shows up that does *not* have the 1345 * ESPFIX issue, we can change this. 1346 */ 1347 #ifdef CONFIG_X86_32 1348 # ifdef CONFIG_PARAVIRT_XXL 1349 do { 1350 extern void native_iret(void); 1351 if (pv_ops.cpu.iret == native_iret) 1352 set_cpu_bug(c, X86_BUG_ESPFIX); 1353 } while (0); 1354 # else 1355 set_cpu_bug(c, X86_BUG_ESPFIX); 1356 # endif 1357 #endif 1358 } 1359 1360 static void x86_init_cache_qos(struct cpuinfo_x86 *c) 1361 { 1362 /* 1363 * The heavy lifting of max_rmid and cache_occ_scale are handled 1364 * in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu 1365 * in case CQM bits really aren't there in this CPU. 1366 */ 1367 if (c != &boot_cpu_data) { 1368 boot_cpu_data.x86_cache_max_rmid = 1369 min(boot_cpu_data.x86_cache_max_rmid, 1370 c->x86_cache_max_rmid); 1371 } 1372 } 1373 1374 /* 1375 * Validate that ACPI/mptables have the same information about the 1376 * effective APIC id and update the package map. 1377 */ 1378 static void validate_apic_and_package_id(struct cpuinfo_x86 *c) 1379 { 1380 #ifdef CONFIG_SMP 1381 unsigned int apicid, cpu = smp_processor_id(); 1382 1383 apicid = apic->cpu_present_to_apicid(cpu); 1384 1385 if (apicid != c->apicid) { 1386 pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n", 1387 cpu, apicid, c->initial_apicid); 1388 } 1389 BUG_ON(topology_update_package_map(c->phys_proc_id, cpu)); 1390 BUG_ON(topology_update_die_map(c->cpu_die_id, cpu)); 1391 #else 1392 c->logical_proc_id = 0; 1393 #endif 1394 } 1395 1396 /* 1397 * This does the hard work of actually picking apart the CPU stuff... 1398 */ 1399 static void identify_cpu(struct cpuinfo_x86 *c) 1400 { 1401 int i; 1402 1403 c->loops_per_jiffy = loops_per_jiffy; 1404 c->x86_cache_size = 0; 1405 c->x86_vendor = X86_VENDOR_UNKNOWN; 1406 c->x86_model = c->x86_stepping = 0; /* So far unknown... */ 1407 c->x86_vendor_id[0] = '\0'; /* Unset */ 1408 c->x86_model_id[0] = '\0'; /* Unset */ 1409 c->x86_max_cores = 1; 1410 c->x86_coreid_bits = 0; 1411 c->cu_id = 0xff; 1412 #ifdef CONFIG_X86_64 1413 c->x86_clflush_size = 64; 1414 c->x86_phys_bits = 36; 1415 c->x86_virt_bits = 48; 1416 #else 1417 c->cpuid_level = -1; /* CPUID not detected */ 1418 c->x86_clflush_size = 32; 1419 c->x86_phys_bits = 32; 1420 c->x86_virt_bits = 32; 1421 #endif 1422 c->x86_cache_alignment = c->x86_clflush_size; 1423 memset(&c->x86_capability, 0, sizeof(c->x86_capability)); 1424 1425 generic_identify(c); 1426 1427 if (this_cpu->c_identify) 1428 this_cpu->c_identify(c); 1429 1430 /* Clear/Set all flags overridden by options, after probe */ 1431 apply_forced_caps(c); 1432 1433 #ifdef CONFIG_X86_64 1434 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 1435 #endif 1436 1437 /* 1438 * Vendor-specific initialization. In this section we 1439 * canonicalize the feature flags, meaning if there are 1440 * features a certain CPU supports which CPUID doesn't 1441 * tell us, CPUID claiming incorrect flags, or other bugs, 1442 * we handle them here. 1443 * 1444 * At the end of this section, c->x86_capability better 1445 * indicate the features this CPU genuinely supports! 1446 */ 1447 if (this_cpu->c_init) 1448 this_cpu->c_init(c); 1449 1450 /* Disable the PN if appropriate */ 1451 squash_the_stupid_serial_number(c); 1452 1453 /* Set up SMEP/SMAP/UMIP */ 1454 setup_smep(c); 1455 setup_smap(c); 1456 setup_umip(c); 1457 1458 /* 1459 * The vendor-specific functions might have changed features. 1460 * Now we do "generic changes." 1461 */ 1462 1463 /* Filter out anything that depends on CPUID levels we don't have */ 1464 filter_cpuid_features(c, true); 1465 1466 /* If the model name is still unset, do table lookup. */ 1467 if (!c->x86_model_id[0]) { 1468 const char *p; 1469 p = table_lookup_model(c); 1470 if (p) 1471 strcpy(c->x86_model_id, p); 1472 else 1473 /* Last resort... */ 1474 sprintf(c->x86_model_id, "%02x/%02x", 1475 c->x86, c->x86_model); 1476 } 1477 1478 #ifdef CONFIG_X86_64 1479 detect_ht(c); 1480 #endif 1481 1482 x86_init_rdrand(c); 1483 x86_init_cache_qos(c); 1484 setup_pku(c); 1485 1486 /* 1487 * Clear/Set all flags overridden by options, need do it 1488 * before following smp all cpus cap AND. 1489 */ 1490 apply_forced_caps(c); 1491 1492 /* 1493 * On SMP, boot_cpu_data holds the common feature set between 1494 * all CPUs; so make sure that we indicate which features are 1495 * common between the CPUs. The first time this routine gets 1496 * executed, c == &boot_cpu_data. 1497 */ 1498 if (c != &boot_cpu_data) { 1499 /* AND the already accumulated flags with these */ 1500 for (i = 0; i < NCAPINTS; i++) 1501 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 1502 1503 /* OR, i.e. replicate the bug flags */ 1504 for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++) 1505 c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; 1506 } 1507 1508 /* Init Machine Check Exception if available. */ 1509 mcheck_cpu_init(c); 1510 1511 select_idle_routine(c); 1512 1513 #ifdef CONFIG_NUMA 1514 numa_add_cpu(smp_processor_id()); 1515 #endif 1516 } 1517 1518 /* 1519 * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions 1520 * on 32-bit kernels: 1521 */ 1522 #ifdef CONFIG_X86_32 1523 void enable_sep_cpu(void) 1524 { 1525 struct tss_struct *tss; 1526 int cpu; 1527 1528 if (!boot_cpu_has(X86_FEATURE_SEP)) 1529 return; 1530 1531 cpu = get_cpu(); 1532 tss = &per_cpu(cpu_tss_rw, cpu); 1533 1534 /* 1535 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- 1536 * see the big comment in struct x86_hw_tss's definition. 1537 */ 1538 1539 tss->x86_tss.ss1 = __KERNEL_CS; 1540 wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); 1541 wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0); 1542 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); 1543 1544 put_cpu(); 1545 } 1546 #endif 1547 1548 void __init identify_boot_cpu(void) 1549 { 1550 identify_cpu(&boot_cpu_data); 1551 #ifdef CONFIG_X86_32 1552 sysenter_setup(); 1553 enable_sep_cpu(); 1554 #endif 1555 cpu_detect_tlb(&boot_cpu_data); 1556 setup_cr_pinning(); 1557 } 1558 1559 void identify_secondary_cpu(struct cpuinfo_x86 *c) 1560 { 1561 BUG_ON(c == &boot_cpu_data); 1562 identify_cpu(c); 1563 #ifdef CONFIG_X86_32 1564 enable_sep_cpu(); 1565 #endif 1566 mtrr_ap_init(); 1567 validate_apic_and_package_id(c); 1568 x86_spec_ctrl_setup_ap(); 1569 } 1570 1571 static __init int setup_noclflush(char *arg) 1572 { 1573 setup_clear_cpu_cap(X86_FEATURE_CLFLUSH); 1574 setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT); 1575 return 1; 1576 } 1577 __setup("noclflush", setup_noclflush); 1578 1579 void print_cpu_info(struct cpuinfo_x86 *c) 1580 { 1581 const char *vendor = NULL; 1582 1583 if (c->x86_vendor < X86_VENDOR_NUM) { 1584 vendor = this_cpu->c_vendor; 1585 } else { 1586 if (c->cpuid_level >= 0) 1587 vendor = c->x86_vendor_id; 1588 } 1589 1590 if (vendor && !strstr(c->x86_model_id, vendor)) 1591 pr_cont("%s ", vendor); 1592 1593 if (c->x86_model_id[0]) 1594 pr_cont("%s", c->x86_model_id); 1595 else 1596 pr_cont("%d86", c->x86); 1597 1598 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); 1599 1600 if (c->x86_stepping || c->cpuid_level >= 0) 1601 pr_cont(", stepping: 0x%x)\n", c->x86_stepping); 1602 else 1603 pr_cont(")\n"); 1604 } 1605 1606 /* 1607 * clearcpuid= was already parsed in fpu__init_parse_early_param. 1608 * But we need to keep a dummy __setup around otherwise it would 1609 * show up as an environment variable for init. 1610 */ 1611 static __init int setup_clearcpuid(char *arg) 1612 { 1613 return 1; 1614 } 1615 __setup("clearcpuid=", setup_clearcpuid); 1616 1617 #ifdef CONFIG_X86_64 1618 DEFINE_PER_CPU_FIRST(struct fixed_percpu_data, 1619 fixed_percpu_data) __aligned(PAGE_SIZE) __visible; 1620 EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data); 1621 1622 /* 1623 * The following percpu variables are hot. Align current_task to 1624 * cacheline size such that they fall in the same cacheline. 1625 */ 1626 DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = 1627 &init_task; 1628 EXPORT_PER_CPU_SYMBOL(current_task); 1629 1630 DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr); 1631 DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; 1632 1633 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; 1634 EXPORT_PER_CPU_SYMBOL(__preempt_count); 1635 1636 /* May not be marked __init: used by software suspend */ 1637 void syscall_init(void) 1638 { 1639 wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); 1640 wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); 1641 1642 #ifdef CONFIG_IA32_EMULATION 1643 wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat); 1644 /* 1645 * This only works on Intel CPUs. 1646 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. 1647 * This does not cause SYSENTER to jump to the wrong location, because 1648 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). 1649 */ 1650 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); 1651 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 1652 (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1)); 1653 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); 1654 #else 1655 wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); 1656 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); 1657 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); 1658 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); 1659 #endif 1660 1661 /* Flags to clear on syscall */ 1662 wrmsrl(MSR_SYSCALL_MASK, 1663 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF| 1664 X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT); 1665 } 1666 1667 DEFINE_PER_CPU(int, debug_stack_usage); 1668 DEFINE_PER_CPU(u32, debug_idt_ctr); 1669 1670 void debug_stack_set_zero(void) 1671 { 1672 this_cpu_inc(debug_idt_ctr); 1673 load_current_idt(); 1674 } 1675 NOKPROBE_SYMBOL(debug_stack_set_zero); 1676 1677 void debug_stack_reset(void) 1678 { 1679 if (WARN_ON(!this_cpu_read(debug_idt_ctr))) 1680 return; 1681 if (this_cpu_dec_return(debug_idt_ctr) == 0) 1682 load_current_idt(); 1683 } 1684 NOKPROBE_SYMBOL(debug_stack_reset); 1685 1686 #else /* CONFIG_X86_64 */ 1687 1688 DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 1689 EXPORT_PER_CPU_SYMBOL(current_task); 1690 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; 1691 EXPORT_PER_CPU_SYMBOL(__preempt_count); 1692 1693 /* 1694 * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find 1695 * the top of the kernel stack. Use an extra percpu variable to track the 1696 * top of the kernel stack directly. 1697 */ 1698 DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = 1699 (unsigned long)&init_thread_union + THREAD_SIZE; 1700 EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); 1701 1702 #ifdef CONFIG_STACKPROTECTOR 1703 DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 1704 #endif 1705 1706 #endif /* CONFIG_X86_64 */ 1707 1708 /* 1709 * Clear all 6 debug registers: 1710 */ 1711 static void clear_all_debug_regs(void) 1712 { 1713 int i; 1714 1715 for (i = 0; i < 8; i++) { 1716 /* Ignore db4, db5 */ 1717 if ((i == 4) || (i == 5)) 1718 continue; 1719 1720 set_debugreg(0, i); 1721 } 1722 } 1723 1724 #ifdef CONFIG_KGDB 1725 /* 1726 * Restore debug regs if using kgdbwait and you have a kernel debugger 1727 * connection established. 1728 */ 1729 static void dbg_restore_debug_regs(void) 1730 { 1731 if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) 1732 arch_kgdb_ops.correct_hw_break(); 1733 } 1734 #else /* ! CONFIG_KGDB */ 1735 #define dbg_restore_debug_regs() 1736 #endif /* ! CONFIG_KGDB */ 1737 1738 static void wait_for_master_cpu(int cpu) 1739 { 1740 #ifdef CONFIG_SMP 1741 /* 1742 * wait for ACK from master CPU before continuing 1743 * with AP initialization 1744 */ 1745 WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)); 1746 while (!cpumask_test_cpu(cpu, cpu_callout_mask)) 1747 cpu_relax(); 1748 #endif 1749 } 1750 1751 #ifdef CONFIG_X86_64 1752 static void setup_getcpu(int cpu) 1753 { 1754 unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu)); 1755 struct desc_struct d = { }; 1756 1757 if (boot_cpu_has(X86_FEATURE_RDTSCP)) 1758 write_rdtscp_aux(cpudata); 1759 1760 /* Store CPU and node number in limit. */ 1761 d.limit0 = cpudata; 1762 d.limit1 = cpudata >> 16; 1763 1764 d.type = 5; /* RO data, expand down, accessed */ 1765 d.dpl = 3; /* Visible to user code */ 1766 d.s = 1; /* Not a system segment */ 1767 d.p = 1; /* Present */ 1768 d.d = 1; /* 32-bit */ 1769 1770 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S); 1771 } 1772 #endif 1773 1774 /* 1775 * cpu_init() initializes state that is per-CPU. Some data is already 1776 * initialized (naturally) in the bootstrap process, such as the GDT 1777 * and IDT. We reload them nevertheless, this function acts as a 1778 * 'CPU state barrier', nothing should get across. 1779 */ 1780 #ifdef CONFIG_X86_64 1781 1782 void cpu_init(void) 1783 { 1784 int cpu = raw_smp_processor_id(); 1785 struct task_struct *me; 1786 struct tss_struct *t; 1787 int i; 1788 1789 wait_for_master_cpu(cpu); 1790 1791 if (cpu) 1792 load_ucode_ap(); 1793 1794 t = &per_cpu(cpu_tss_rw, cpu); 1795 1796 #ifdef CONFIG_NUMA 1797 if (this_cpu_read(numa_node) == 0 && 1798 early_cpu_to_node(cpu) != NUMA_NO_NODE) 1799 set_numa_node(early_cpu_to_node(cpu)); 1800 #endif 1801 setup_getcpu(cpu); 1802 1803 me = current; 1804 1805 pr_debug("Initializing CPU#%d\n", cpu); 1806 1807 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1808 1809 /* 1810 * Initialize the per-CPU GDT with the boot GDT, 1811 * and set up the GDT descriptor: 1812 */ 1813 1814 switch_to_new_gdt(cpu); 1815 loadsegment(fs, 0); 1816 1817 load_current_idt(); 1818 1819 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 1820 syscall_init(); 1821 1822 wrmsrl(MSR_FS_BASE, 0); 1823 wrmsrl(MSR_KERNEL_GS_BASE, 0); 1824 barrier(); 1825 1826 x86_configure_nx(); 1827 x2apic_setup(); 1828 1829 /* 1830 * set up and load the per-CPU TSS 1831 */ 1832 if (!t->x86_tss.ist[0]) { 1833 t->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF); 1834 t->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI); 1835 t->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB); 1836 t->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE); 1837 } 1838 1839 t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; 1840 1841 /* 1842 * <= is required because the CPU will access up to 1843 * 8 bits beyond the end of the IO permission bitmap. 1844 */ 1845 for (i = 0; i <= IO_BITMAP_LONGS; i++) 1846 t->io_bitmap[i] = ~0UL; 1847 1848 mmgrab(&init_mm); 1849 me->active_mm = &init_mm; 1850 BUG_ON(me->mm); 1851 initialize_tlbstate_and_flush(); 1852 enter_lazy_tlb(&init_mm, me); 1853 1854 /* 1855 * Initialize the TSS. sp0 points to the entry trampoline stack 1856 * regardless of what task is running. 1857 */ 1858 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); 1859 load_TR_desc(); 1860 load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1)); 1861 1862 load_mm_ldt(&init_mm); 1863 1864 clear_all_debug_regs(); 1865 dbg_restore_debug_regs(); 1866 1867 fpu__init_cpu(); 1868 1869 if (is_uv_system()) 1870 uv_cpu_init(); 1871 1872 load_fixmap_gdt(cpu); 1873 } 1874 1875 #else 1876 1877 void cpu_init(void) 1878 { 1879 int cpu = smp_processor_id(); 1880 struct task_struct *curr = current; 1881 struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu); 1882 1883 wait_for_master_cpu(cpu); 1884 1885 show_ucode_info_early(); 1886 1887 pr_info("Initializing CPU#%d\n", cpu); 1888 1889 if (cpu_feature_enabled(X86_FEATURE_VME) || 1890 boot_cpu_has(X86_FEATURE_TSC) || 1891 boot_cpu_has(X86_FEATURE_DE)) 1892 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1893 1894 load_current_idt(); 1895 switch_to_new_gdt(cpu); 1896 1897 /* 1898 * Set up and load the per-CPU TSS and LDT 1899 */ 1900 mmgrab(&init_mm); 1901 curr->active_mm = &init_mm; 1902 BUG_ON(curr->mm); 1903 initialize_tlbstate_and_flush(); 1904 enter_lazy_tlb(&init_mm, curr); 1905 1906 /* 1907 * Initialize the TSS. sp0 points to the entry trampoline stack 1908 * regardless of what task is running. 1909 */ 1910 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); 1911 load_TR_desc(); 1912 load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1)); 1913 1914 load_mm_ldt(&init_mm); 1915 1916 t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; 1917 1918 #ifdef CONFIG_DOUBLEFAULT 1919 /* Set up doublefault TSS pointer in the GDT */ 1920 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1921 #endif 1922 1923 clear_all_debug_regs(); 1924 dbg_restore_debug_regs(); 1925 1926 fpu__init_cpu(); 1927 1928 load_fixmap_gdt(cpu); 1929 } 1930 #endif 1931 1932 /* 1933 * The microcode loader calls this upon late microcode load to recheck features, 1934 * only when microcode has been updated. Caller holds microcode_mutex and CPU 1935 * hotplug lock. 1936 */ 1937 void microcode_check(void) 1938 { 1939 struct cpuinfo_x86 info; 1940 1941 perf_check_microcode(); 1942 1943 /* Reload CPUID max function as it might've changed. */ 1944 info.cpuid_level = cpuid_eax(0); 1945 1946 /* 1947 * Copy all capability leafs to pick up the synthetic ones so that 1948 * memcmp() below doesn't fail on that. The ones coming from CPUID will 1949 * get overwritten in get_cpu_cap(). 1950 */ 1951 memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)); 1952 1953 get_cpu_cap(&info); 1954 1955 if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability))) 1956 return; 1957 1958 pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n"); 1959 pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); 1960 } 1961 1962 /* 1963 * Invoked from core CPU hotplug code after hotplug operations 1964 */ 1965 void arch_smt_update(void) 1966 { 1967 /* Handle the speculative execution misfeatures */ 1968 cpu_bugs_smt_update(); 1969 /* Check whether IPI broadcasting can be enabled */ 1970 apic_smt_update(); 1971 } 1972