xref: /openbmc/linux/arch/x86/kernel/cpu/common.c (revision 6724ed7f)
1 #include <linux/bootmem.h>
2 #include <linux/linkage.h>
3 #include <linux/bitops.h>
4 #include <linux/kernel.h>
5 #include <linux/export.h>
6 #include <linux/percpu.h>
7 #include <linux/string.h>
8 #include <linux/ctype.h>
9 #include <linux/delay.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/clock.h>
12 #include <linux/sched/task.h>
13 #include <linux/init.h>
14 #include <linux/kprobes.h>
15 #include <linux/kgdb.h>
16 #include <linux/smp.h>
17 #include <linux/io.h>
18 #include <linux/syscore_ops.h>
19 
20 #include <asm/stackprotector.h>
21 #include <asm/perf_event.h>
22 #include <asm/mmu_context.h>
23 #include <asm/archrandom.h>
24 #include <asm/hypervisor.h>
25 #include <asm/processor.h>
26 #include <asm/tlbflush.h>
27 #include <asm/debugreg.h>
28 #include <asm/sections.h>
29 #include <asm/vsyscall.h>
30 #include <linux/topology.h>
31 #include <linux/cpumask.h>
32 #include <asm/pgtable.h>
33 #include <linux/atomic.h>
34 #include <asm/proto.h>
35 #include <asm/setup.h>
36 #include <asm/apic.h>
37 #include <asm/desc.h>
38 #include <asm/fpu/internal.h>
39 #include <asm/mtrr.h>
40 #include <asm/hwcap2.h>
41 #include <linux/numa.h>
42 #include <asm/asm.h>
43 #include <asm/bugs.h>
44 #include <asm/cpu.h>
45 #include <asm/mce.h>
46 #include <asm/msr.h>
47 #include <asm/pat.h>
48 #include <asm/microcode.h>
49 #include <asm/microcode_intel.h>
50 
51 #ifdef CONFIG_X86_LOCAL_APIC
52 #include <asm/uv/uv.h>
53 #endif
54 
55 #include "cpu.h"
56 
57 u32 elf_hwcap2 __read_mostly;
58 
59 /* all of these masks are initialized in setup_cpu_local_masks() */
60 cpumask_var_t cpu_initialized_mask;
61 cpumask_var_t cpu_callout_mask;
62 cpumask_var_t cpu_callin_mask;
63 
64 /* representing cpus for which sibling maps can be computed */
65 cpumask_var_t cpu_sibling_setup_mask;
66 
67 /* correctly size the local cpu masks */
68 void __init setup_cpu_local_masks(void)
69 {
70 	alloc_bootmem_cpumask_var(&cpu_initialized_mask);
71 	alloc_bootmem_cpumask_var(&cpu_callin_mask);
72 	alloc_bootmem_cpumask_var(&cpu_callout_mask);
73 	alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
74 }
75 
76 static void default_init(struct cpuinfo_x86 *c)
77 {
78 #ifdef CONFIG_X86_64
79 	cpu_detect_cache_sizes(c);
80 #else
81 	/* Not much we can do here... */
82 	/* Check if at least it has cpuid */
83 	if (c->cpuid_level == -1) {
84 		/* No cpuid. It must be an ancient CPU */
85 		if (c->x86 == 4)
86 			strcpy(c->x86_model_id, "486");
87 		else if (c->x86 == 3)
88 			strcpy(c->x86_model_id, "386");
89 	}
90 #endif
91 }
92 
93 static const struct cpu_dev default_cpu = {
94 	.c_init		= default_init,
95 	.c_vendor	= "Unknown",
96 	.c_x86_vendor	= X86_VENDOR_UNKNOWN,
97 };
98 
99 static const struct cpu_dev *this_cpu = &default_cpu;
100 
101 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
102 #ifdef CONFIG_X86_64
103 	/*
104 	 * We need valid kernel segments for data and code in long mode too
105 	 * IRET will check the segment types  kkeil 2000/10/28
106 	 * Also sysret mandates a special GDT layout
107 	 *
108 	 * TLS descriptors are currently at a different place compared to i386.
109 	 * Hopefully nobody expects them at a fixed place (Wine?)
110 	 */
111 	[GDT_ENTRY_KERNEL32_CS]		= GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
112 	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
113 	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
114 	[GDT_ENTRY_DEFAULT_USER32_CS]	= GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
115 	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
116 	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
117 #else
118 	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
119 	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
120 	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
121 	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
122 	/*
123 	 * Segments used for calling PnP BIOS have byte granularity.
124 	 * They code segments and data segments have fixed 64k limits,
125 	 * the transfer segment sizes are set at run time.
126 	 */
127 	/* 32-bit code */
128 	[GDT_ENTRY_PNPBIOS_CS32]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
129 	/* 16-bit code */
130 	[GDT_ENTRY_PNPBIOS_CS16]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
131 	/* 16-bit data */
132 	[GDT_ENTRY_PNPBIOS_DS]		= GDT_ENTRY_INIT(0x0092, 0, 0xffff),
133 	/* 16-bit data */
134 	[GDT_ENTRY_PNPBIOS_TS1]		= GDT_ENTRY_INIT(0x0092, 0, 0),
135 	/* 16-bit data */
136 	[GDT_ENTRY_PNPBIOS_TS2]		= GDT_ENTRY_INIT(0x0092, 0, 0),
137 	/*
138 	 * The APM segments have byte granularity and their bases
139 	 * are set at run time.  All have 64k limits.
140 	 */
141 	/* 32-bit code */
142 	[GDT_ENTRY_APMBIOS_BASE]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
143 	/* 16-bit code */
144 	[GDT_ENTRY_APMBIOS_BASE+1]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
145 	/* data */
146 	[GDT_ENTRY_APMBIOS_BASE+2]	= GDT_ENTRY_INIT(0x4092, 0, 0xffff),
147 
148 	[GDT_ENTRY_ESPFIX_SS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
149 	[GDT_ENTRY_PERCPU]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
150 	GDT_STACK_CANARY_INIT
151 #endif
152 } };
153 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
154 
155 static int __init x86_mpx_setup(char *s)
156 {
157 	/* require an exact match without trailing characters */
158 	if (strlen(s))
159 		return 0;
160 
161 	/* do not emit a message if the feature is not present */
162 	if (!boot_cpu_has(X86_FEATURE_MPX))
163 		return 1;
164 
165 	setup_clear_cpu_cap(X86_FEATURE_MPX);
166 	pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n");
167 	return 1;
168 }
169 __setup("nompx", x86_mpx_setup);
170 
171 #ifdef CONFIG_X86_64
172 static int __init x86_nopcid_setup(char *s)
173 {
174 	/* nopcid doesn't accept parameters */
175 	if (s)
176 		return -EINVAL;
177 
178 	/* do not emit a message if the feature is not present */
179 	if (!boot_cpu_has(X86_FEATURE_PCID))
180 		return 0;
181 
182 	setup_clear_cpu_cap(X86_FEATURE_PCID);
183 	pr_info("nopcid: PCID feature disabled\n");
184 	return 0;
185 }
186 early_param("nopcid", x86_nopcid_setup);
187 #endif
188 
189 static int __init x86_noinvpcid_setup(char *s)
190 {
191 	/* noinvpcid doesn't accept parameters */
192 	if (s)
193 		return -EINVAL;
194 
195 	/* do not emit a message if the feature is not present */
196 	if (!boot_cpu_has(X86_FEATURE_INVPCID))
197 		return 0;
198 
199 	setup_clear_cpu_cap(X86_FEATURE_INVPCID);
200 	pr_info("noinvpcid: INVPCID feature disabled\n");
201 	return 0;
202 }
203 early_param("noinvpcid", x86_noinvpcid_setup);
204 
205 #ifdef CONFIG_X86_32
206 static int cachesize_override = -1;
207 static int disable_x86_serial_nr = 1;
208 
209 static int __init cachesize_setup(char *str)
210 {
211 	get_option(&str, &cachesize_override);
212 	return 1;
213 }
214 __setup("cachesize=", cachesize_setup);
215 
216 static int __init x86_sep_setup(char *s)
217 {
218 	setup_clear_cpu_cap(X86_FEATURE_SEP);
219 	return 1;
220 }
221 __setup("nosep", x86_sep_setup);
222 
223 /* Standard macro to see if a specific flag is changeable */
224 static inline int flag_is_changeable_p(u32 flag)
225 {
226 	u32 f1, f2;
227 
228 	/*
229 	 * Cyrix and IDT cpus allow disabling of CPUID
230 	 * so the code below may return different results
231 	 * when it is executed before and after enabling
232 	 * the CPUID. Add "volatile" to not allow gcc to
233 	 * optimize the subsequent calls to this function.
234 	 */
235 	asm volatile ("pushfl		\n\t"
236 		      "pushfl		\n\t"
237 		      "popl %0		\n\t"
238 		      "movl %0, %1	\n\t"
239 		      "xorl %2, %0	\n\t"
240 		      "pushl %0		\n\t"
241 		      "popfl		\n\t"
242 		      "pushfl		\n\t"
243 		      "popl %0		\n\t"
244 		      "popfl		\n\t"
245 
246 		      : "=&r" (f1), "=&r" (f2)
247 		      : "ir" (flag));
248 
249 	return ((f1^f2) & flag) != 0;
250 }
251 
252 /* Probe for the CPUID instruction */
253 int have_cpuid_p(void)
254 {
255 	return flag_is_changeable_p(X86_EFLAGS_ID);
256 }
257 
258 static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
259 {
260 	unsigned long lo, hi;
261 
262 	if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
263 		return;
264 
265 	/* Disable processor serial number: */
266 
267 	rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
268 	lo |= 0x200000;
269 	wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
270 
271 	pr_notice("CPU serial number disabled.\n");
272 	clear_cpu_cap(c, X86_FEATURE_PN);
273 
274 	/* Disabling the serial number may affect the cpuid level */
275 	c->cpuid_level = cpuid_eax(0);
276 }
277 
278 static int __init x86_serial_nr_setup(char *s)
279 {
280 	disable_x86_serial_nr = 0;
281 	return 1;
282 }
283 __setup("serialnumber", x86_serial_nr_setup);
284 #else
285 static inline int flag_is_changeable_p(u32 flag)
286 {
287 	return 1;
288 }
289 static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
290 {
291 }
292 #endif
293 
294 static __init int setup_disable_smep(char *arg)
295 {
296 	setup_clear_cpu_cap(X86_FEATURE_SMEP);
297 	/* Check for things that depend on SMEP being enabled: */
298 	check_mpx_erratum(&boot_cpu_data);
299 	return 1;
300 }
301 __setup("nosmep", setup_disable_smep);
302 
303 static __always_inline void setup_smep(struct cpuinfo_x86 *c)
304 {
305 	if (cpu_has(c, X86_FEATURE_SMEP))
306 		cr4_set_bits(X86_CR4_SMEP);
307 }
308 
309 static __init int setup_disable_smap(char *arg)
310 {
311 	setup_clear_cpu_cap(X86_FEATURE_SMAP);
312 	return 1;
313 }
314 __setup("nosmap", setup_disable_smap);
315 
316 static __always_inline void setup_smap(struct cpuinfo_x86 *c)
317 {
318 	unsigned long eflags = native_save_fl();
319 
320 	/* This should have been cleared long ago */
321 	BUG_ON(eflags & X86_EFLAGS_AC);
322 
323 	if (cpu_has(c, X86_FEATURE_SMAP)) {
324 #ifdef CONFIG_X86_SMAP
325 		cr4_set_bits(X86_CR4_SMAP);
326 #else
327 		cr4_clear_bits(X86_CR4_SMAP);
328 #endif
329 	}
330 }
331 
332 static __always_inline void setup_umip(struct cpuinfo_x86 *c)
333 {
334 	/* Check the boot processor, plus build option for UMIP. */
335 	if (!cpu_feature_enabled(X86_FEATURE_UMIP))
336 		goto out;
337 
338 	/* Check the current processor's cpuid bits. */
339 	if (!cpu_has(c, X86_FEATURE_UMIP))
340 		goto out;
341 
342 	cr4_set_bits(X86_CR4_UMIP);
343 
344 	pr_info("x86/cpu: Activated the Intel User Mode Instruction Prevention (UMIP) CPU feature\n");
345 
346 	return;
347 
348 out:
349 	/*
350 	 * Make sure UMIP is disabled in case it was enabled in a
351 	 * previous boot (e.g., via kexec).
352 	 */
353 	cr4_clear_bits(X86_CR4_UMIP);
354 }
355 
356 /*
357  * Protection Keys are not available in 32-bit mode.
358  */
359 static bool pku_disabled;
360 
361 static __always_inline void setup_pku(struct cpuinfo_x86 *c)
362 {
363 	/* check the boot processor, plus compile options for PKU: */
364 	if (!cpu_feature_enabled(X86_FEATURE_PKU))
365 		return;
366 	/* checks the actual processor's cpuid bits: */
367 	if (!cpu_has(c, X86_FEATURE_PKU))
368 		return;
369 	if (pku_disabled)
370 		return;
371 
372 	cr4_set_bits(X86_CR4_PKE);
373 	/*
374 	 * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
375 	 * cpuid bit to be set.  We need to ensure that we
376 	 * update that bit in this CPU's "cpu_info".
377 	 */
378 	get_cpu_cap(c);
379 }
380 
381 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
382 static __init int setup_disable_pku(char *arg)
383 {
384 	/*
385 	 * Do not clear the X86_FEATURE_PKU bit.  All of the
386 	 * runtime checks are against OSPKE so clearing the
387 	 * bit does nothing.
388 	 *
389 	 * This way, we will see "pku" in cpuinfo, but not
390 	 * "ospke", which is exactly what we want.  It shows
391 	 * that the CPU has PKU, but the OS has not enabled it.
392 	 * This happens to be exactly how a system would look
393 	 * if we disabled the config option.
394 	 */
395 	pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
396 	pku_disabled = true;
397 	return 1;
398 }
399 __setup("nopku", setup_disable_pku);
400 #endif /* CONFIG_X86_64 */
401 
402 /*
403  * Some CPU features depend on higher CPUID levels, which may not always
404  * be available due to CPUID level capping or broken virtualization
405  * software.  Add those features to this table to auto-disable them.
406  */
407 struct cpuid_dependent_feature {
408 	u32 feature;
409 	u32 level;
410 };
411 
412 static const struct cpuid_dependent_feature
413 cpuid_dependent_features[] = {
414 	{ X86_FEATURE_MWAIT,		0x00000005 },
415 	{ X86_FEATURE_DCA,		0x00000009 },
416 	{ X86_FEATURE_XSAVE,		0x0000000d },
417 	{ 0, 0 }
418 };
419 
420 static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
421 {
422 	const struct cpuid_dependent_feature *df;
423 
424 	for (df = cpuid_dependent_features; df->feature; df++) {
425 
426 		if (!cpu_has(c, df->feature))
427 			continue;
428 		/*
429 		 * Note: cpuid_level is set to -1 if unavailable, but
430 		 * extended_extended_level is set to 0 if unavailable
431 		 * and the legitimate extended levels are all negative
432 		 * when signed; hence the weird messing around with
433 		 * signs here...
434 		 */
435 		if (!((s32)df->level < 0 ?
436 		     (u32)df->level > (u32)c->extended_cpuid_level :
437 		     (s32)df->level > (s32)c->cpuid_level))
438 			continue;
439 
440 		clear_cpu_cap(c, df->feature);
441 		if (!warn)
442 			continue;
443 
444 		pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
445 			x86_cap_flag(df->feature), df->level);
446 	}
447 }
448 
449 /*
450  * Naming convention should be: <Name> [(<Codename>)]
451  * This table only is used unless init_<vendor>() below doesn't set it;
452  * in particular, if CPUID levels 0x80000002..4 are supported, this
453  * isn't used
454  */
455 
456 /* Look up CPU names by table lookup. */
457 static const char *table_lookup_model(struct cpuinfo_x86 *c)
458 {
459 #ifdef CONFIG_X86_32
460 	const struct legacy_cpu_model_info *info;
461 
462 	if (c->x86_model >= 16)
463 		return NULL;	/* Range check */
464 
465 	if (!this_cpu)
466 		return NULL;
467 
468 	info = this_cpu->legacy_models;
469 
470 	while (info->family) {
471 		if (info->family == c->x86)
472 			return info->model_names[c->x86_model];
473 		info++;
474 	}
475 #endif
476 	return NULL;		/* Not found */
477 }
478 
479 __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
480 __u32 cpu_caps_set[NCAPINTS + NBUGINTS];
481 
482 void load_percpu_segment(int cpu)
483 {
484 #ifdef CONFIG_X86_32
485 	loadsegment(fs, __KERNEL_PERCPU);
486 #else
487 	__loadsegment_simple(gs, 0);
488 	wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
489 #endif
490 	load_stack_canary_segment();
491 }
492 
493 #ifdef CONFIG_X86_32
494 /* The 32-bit entry code needs to find cpu_entry_area. */
495 DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
496 #endif
497 
498 #ifdef CONFIG_X86_64
499 /*
500  * Special IST stacks which the CPU switches to when it calls
501  * an IST-marked descriptor entry. Up to 7 stacks (hardware
502  * limit), all of them are 4K, except the debug stack which
503  * is 8K.
504  */
505 static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
506 	  [0 ... N_EXCEPTION_STACKS - 1]	= EXCEPTION_STKSZ,
507 	  [DEBUG_STACK - 1]			= DEBUG_STKSZ
508 };
509 #endif
510 
511 /* Load the original GDT from the per-cpu structure */
512 void load_direct_gdt(int cpu)
513 {
514 	struct desc_ptr gdt_descr;
515 
516 	gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
517 	gdt_descr.size = GDT_SIZE - 1;
518 	load_gdt(&gdt_descr);
519 }
520 EXPORT_SYMBOL_GPL(load_direct_gdt);
521 
522 /* Load a fixmap remapping of the per-cpu GDT */
523 void load_fixmap_gdt(int cpu)
524 {
525 	struct desc_ptr gdt_descr;
526 
527 	gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
528 	gdt_descr.size = GDT_SIZE - 1;
529 	load_gdt(&gdt_descr);
530 }
531 EXPORT_SYMBOL_GPL(load_fixmap_gdt);
532 
533 /*
534  * Current gdt points %fs at the "master" per-cpu area: after this,
535  * it's on the real one.
536  */
537 void switch_to_new_gdt(int cpu)
538 {
539 	/* Load the original GDT */
540 	load_direct_gdt(cpu);
541 	/* Reload the per-cpu base */
542 	load_percpu_segment(cpu);
543 }
544 
545 static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
546 
547 static void get_model_name(struct cpuinfo_x86 *c)
548 {
549 	unsigned int *v;
550 	char *p, *q, *s;
551 
552 	if (c->extended_cpuid_level < 0x80000004)
553 		return;
554 
555 	v = (unsigned int *)c->x86_model_id;
556 	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
557 	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
558 	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
559 	c->x86_model_id[48] = 0;
560 
561 	/* Trim whitespace */
562 	p = q = s = &c->x86_model_id[0];
563 
564 	while (*p == ' ')
565 		p++;
566 
567 	while (*p) {
568 		/* Note the last non-whitespace index */
569 		if (!isspace(*p))
570 			s = q;
571 
572 		*q++ = *p++;
573 	}
574 
575 	*(s + 1) = '\0';
576 }
577 
578 void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
579 {
580 	unsigned int n, dummy, ebx, ecx, edx, l2size;
581 
582 	n = c->extended_cpuid_level;
583 
584 	if (n >= 0x80000005) {
585 		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
586 		c->x86_cache_size = (ecx>>24) + (edx>>24);
587 #ifdef CONFIG_X86_64
588 		/* On K8 L1 TLB is inclusive, so don't count it */
589 		c->x86_tlbsize = 0;
590 #endif
591 	}
592 
593 	if (n < 0x80000006)	/* Some chips just has a large L1. */
594 		return;
595 
596 	cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
597 	l2size = ecx >> 16;
598 
599 #ifdef CONFIG_X86_64
600 	c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
601 #else
602 	/* do processor-specific cache resizing */
603 	if (this_cpu->legacy_cache_size)
604 		l2size = this_cpu->legacy_cache_size(c, l2size);
605 
606 	/* Allow user to override all this if necessary. */
607 	if (cachesize_override != -1)
608 		l2size = cachesize_override;
609 
610 	if (l2size == 0)
611 		return;		/* Again, no L2 cache is possible */
612 #endif
613 
614 	c->x86_cache_size = l2size;
615 }
616 
617 u16 __read_mostly tlb_lli_4k[NR_INFO];
618 u16 __read_mostly tlb_lli_2m[NR_INFO];
619 u16 __read_mostly tlb_lli_4m[NR_INFO];
620 u16 __read_mostly tlb_lld_4k[NR_INFO];
621 u16 __read_mostly tlb_lld_2m[NR_INFO];
622 u16 __read_mostly tlb_lld_4m[NR_INFO];
623 u16 __read_mostly tlb_lld_1g[NR_INFO];
624 
625 static void cpu_detect_tlb(struct cpuinfo_x86 *c)
626 {
627 	if (this_cpu->c_detect_tlb)
628 		this_cpu->c_detect_tlb(c);
629 
630 	pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
631 		tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
632 		tlb_lli_4m[ENTRIES]);
633 
634 	pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
635 		tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
636 		tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
637 }
638 
639 void detect_ht(struct cpuinfo_x86 *c)
640 {
641 #ifdef CONFIG_SMP
642 	u32 eax, ebx, ecx, edx;
643 	int index_msb, core_bits;
644 	static bool printed;
645 
646 	if (!cpu_has(c, X86_FEATURE_HT))
647 		return;
648 
649 	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
650 		goto out;
651 
652 	if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
653 		return;
654 
655 	cpuid(1, &eax, &ebx, &ecx, &edx);
656 
657 	smp_num_siblings = (ebx & 0xff0000) >> 16;
658 
659 	if (smp_num_siblings == 1) {
660 		pr_info_once("CPU0: Hyper-Threading is disabled\n");
661 		goto out;
662 	}
663 
664 	if (smp_num_siblings <= 1)
665 		goto out;
666 
667 	index_msb = get_count_order(smp_num_siblings);
668 	c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
669 
670 	smp_num_siblings = smp_num_siblings / c->x86_max_cores;
671 
672 	index_msb = get_count_order(smp_num_siblings);
673 
674 	core_bits = get_count_order(c->x86_max_cores);
675 
676 	c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
677 				       ((1 << core_bits) - 1);
678 
679 out:
680 	if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
681 		pr_info("CPU: Physical Processor ID: %d\n",
682 			c->phys_proc_id);
683 		pr_info("CPU: Processor Core ID: %d\n",
684 			c->cpu_core_id);
685 		printed = 1;
686 	}
687 #endif
688 }
689 
690 static void get_cpu_vendor(struct cpuinfo_x86 *c)
691 {
692 	char *v = c->x86_vendor_id;
693 	int i;
694 
695 	for (i = 0; i < X86_VENDOR_NUM; i++) {
696 		if (!cpu_devs[i])
697 			break;
698 
699 		if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
700 		    (cpu_devs[i]->c_ident[1] &&
701 		     !strcmp(v, cpu_devs[i]->c_ident[1]))) {
702 
703 			this_cpu = cpu_devs[i];
704 			c->x86_vendor = this_cpu->c_x86_vendor;
705 			return;
706 		}
707 	}
708 
709 	pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
710 		    "CPU: Your system may be unstable.\n", v);
711 
712 	c->x86_vendor = X86_VENDOR_UNKNOWN;
713 	this_cpu = &default_cpu;
714 }
715 
716 void cpu_detect(struct cpuinfo_x86 *c)
717 {
718 	/* Get vendor name */
719 	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
720 	      (unsigned int *)&c->x86_vendor_id[0],
721 	      (unsigned int *)&c->x86_vendor_id[8],
722 	      (unsigned int *)&c->x86_vendor_id[4]);
723 
724 	c->x86 = 4;
725 	/* Intel-defined flags: level 0x00000001 */
726 	if (c->cpuid_level >= 0x00000001) {
727 		u32 junk, tfms, cap0, misc;
728 
729 		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
730 		c->x86		= x86_family(tfms);
731 		c->x86_model	= x86_model(tfms);
732 		c->x86_mask	= x86_stepping(tfms);
733 
734 		if (cap0 & (1<<19)) {
735 			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
736 			c->x86_cache_alignment = c->x86_clflush_size;
737 		}
738 	}
739 }
740 
741 static void apply_forced_caps(struct cpuinfo_x86 *c)
742 {
743 	int i;
744 
745 	for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
746 		c->x86_capability[i] &= ~cpu_caps_cleared[i];
747 		c->x86_capability[i] |= cpu_caps_set[i];
748 	}
749 }
750 
751 void get_cpu_cap(struct cpuinfo_x86 *c)
752 {
753 	u32 eax, ebx, ecx, edx;
754 
755 	/* Intel-defined flags: level 0x00000001 */
756 	if (c->cpuid_level >= 0x00000001) {
757 		cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
758 
759 		c->x86_capability[CPUID_1_ECX] = ecx;
760 		c->x86_capability[CPUID_1_EDX] = edx;
761 	}
762 
763 	/* Thermal and Power Management Leaf: level 0x00000006 (eax) */
764 	if (c->cpuid_level >= 0x00000006)
765 		c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
766 
767 	/* Additional Intel-defined flags: level 0x00000007 */
768 	if (c->cpuid_level >= 0x00000007) {
769 		cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
770 		c->x86_capability[CPUID_7_0_EBX] = ebx;
771 		c->x86_capability[CPUID_7_ECX] = ecx;
772 	}
773 
774 	/* Extended state features: level 0x0000000d */
775 	if (c->cpuid_level >= 0x0000000d) {
776 		cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
777 
778 		c->x86_capability[CPUID_D_1_EAX] = eax;
779 	}
780 
781 	/* Additional Intel-defined flags: level 0x0000000F */
782 	if (c->cpuid_level >= 0x0000000F) {
783 
784 		/* QoS sub-leaf, EAX=0Fh, ECX=0 */
785 		cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
786 		c->x86_capability[CPUID_F_0_EDX] = edx;
787 
788 		if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
789 			/* will be overridden if occupancy monitoring exists */
790 			c->x86_cache_max_rmid = ebx;
791 
792 			/* QoS sub-leaf, EAX=0Fh, ECX=1 */
793 			cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
794 			c->x86_capability[CPUID_F_1_EDX] = edx;
795 
796 			if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) ||
797 			      ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) ||
798 			       (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) {
799 				c->x86_cache_max_rmid = ecx;
800 				c->x86_cache_occ_scale = ebx;
801 			}
802 		} else {
803 			c->x86_cache_max_rmid = -1;
804 			c->x86_cache_occ_scale = -1;
805 		}
806 	}
807 
808 	/* AMD-defined flags: level 0x80000001 */
809 	eax = cpuid_eax(0x80000000);
810 	c->extended_cpuid_level = eax;
811 
812 	if ((eax & 0xffff0000) == 0x80000000) {
813 		if (eax >= 0x80000001) {
814 			cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
815 
816 			c->x86_capability[CPUID_8000_0001_ECX] = ecx;
817 			c->x86_capability[CPUID_8000_0001_EDX] = edx;
818 		}
819 	}
820 
821 	if (c->extended_cpuid_level >= 0x80000007) {
822 		cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
823 
824 		c->x86_capability[CPUID_8000_0007_EBX] = ebx;
825 		c->x86_power = edx;
826 	}
827 
828 	if (c->extended_cpuid_level >= 0x80000008) {
829 		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
830 
831 		c->x86_virt_bits = (eax >> 8) & 0xff;
832 		c->x86_phys_bits = eax & 0xff;
833 		c->x86_capability[CPUID_8000_0008_EBX] = ebx;
834 	}
835 #ifdef CONFIG_X86_32
836 	else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
837 		c->x86_phys_bits = 36;
838 #endif
839 
840 	if (c->extended_cpuid_level >= 0x8000000a)
841 		c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
842 
843 	init_scattered_cpuid_features(c);
844 
845 	/*
846 	 * Clear/Set all flags overridden by options, after probe.
847 	 * This needs to happen each time we re-probe, which may happen
848 	 * several times during CPU initialization.
849 	 */
850 	apply_forced_caps(c);
851 }
852 
853 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
854 {
855 #ifdef CONFIG_X86_32
856 	int i;
857 
858 	/*
859 	 * First of all, decide if this is a 486 or higher
860 	 * It's a 486 if we can modify the AC flag
861 	 */
862 	if (flag_is_changeable_p(X86_EFLAGS_AC))
863 		c->x86 = 4;
864 	else
865 		c->x86 = 3;
866 
867 	for (i = 0; i < X86_VENDOR_NUM; i++)
868 		if (cpu_devs[i] && cpu_devs[i]->c_identify) {
869 			c->x86_vendor_id[0] = 0;
870 			cpu_devs[i]->c_identify(c);
871 			if (c->x86_vendor_id[0]) {
872 				get_cpu_vendor(c);
873 				break;
874 			}
875 		}
876 #endif
877 }
878 
879 /*
880  * Do minimum CPU detection early.
881  * Fields really needed: vendor, cpuid_level, family, model, mask,
882  * cache alignment.
883  * The others are not touched to avoid unwanted side effects.
884  *
885  * WARNING: this function is only called on the boot CPU.  Don't add code
886  * here that is supposed to run on all CPUs.
887  */
888 static void __init early_identify_cpu(struct cpuinfo_x86 *c)
889 {
890 #ifdef CONFIG_X86_64
891 	c->x86_clflush_size = 64;
892 	c->x86_phys_bits = 36;
893 	c->x86_virt_bits = 48;
894 #else
895 	c->x86_clflush_size = 32;
896 	c->x86_phys_bits = 32;
897 	c->x86_virt_bits = 32;
898 #endif
899 	c->x86_cache_alignment = c->x86_clflush_size;
900 
901 	memset(&c->x86_capability, 0, sizeof c->x86_capability);
902 	c->extended_cpuid_level = 0;
903 
904 	/* cyrix could have cpuid enabled via c_identify()*/
905 	if (have_cpuid_p()) {
906 		cpu_detect(c);
907 		get_cpu_vendor(c);
908 		get_cpu_cap(c);
909 		setup_force_cpu_cap(X86_FEATURE_CPUID);
910 
911 		if (this_cpu->c_early_init)
912 			this_cpu->c_early_init(c);
913 
914 		c->cpu_index = 0;
915 		filter_cpuid_features(c, false);
916 
917 		if (this_cpu->c_bsp_init)
918 			this_cpu->c_bsp_init(c);
919 	} else {
920 		identify_cpu_without_cpuid(c);
921 		setup_clear_cpu_cap(X86_FEATURE_CPUID);
922 	}
923 
924 	setup_force_cpu_cap(X86_FEATURE_ALWAYS);
925 
926 	/* Assume for now that ALL x86 CPUs are insecure */
927 	setup_force_cpu_bug(X86_BUG_CPU_INSECURE);
928 
929 	fpu__init_system(c);
930 
931 #ifdef CONFIG_X86_32
932 	/*
933 	 * Regardless of whether PCID is enumerated, the SDM says
934 	 * that it can't be enabled in 32-bit mode.
935 	 */
936 	setup_clear_cpu_cap(X86_FEATURE_PCID);
937 #endif
938 }
939 
940 void __init early_cpu_init(void)
941 {
942 	const struct cpu_dev *const *cdev;
943 	int count = 0;
944 
945 #ifdef CONFIG_PROCESSOR_SELECT
946 	pr_info("KERNEL supported cpus:\n");
947 #endif
948 
949 	for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
950 		const struct cpu_dev *cpudev = *cdev;
951 
952 		if (count >= X86_VENDOR_NUM)
953 			break;
954 		cpu_devs[count] = cpudev;
955 		count++;
956 
957 #ifdef CONFIG_PROCESSOR_SELECT
958 		{
959 			unsigned int j;
960 
961 			for (j = 0; j < 2; j++) {
962 				if (!cpudev->c_ident[j])
963 					continue;
964 				pr_info("  %s %s\n", cpudev->c_vendor,
965 					cpudev->c_ident[j]);
966 			}
967 		}
968 #endif
969 	}
970 	early_identify_cpu(&boot_cpu_data);
971 }
972 
973 /*
974  * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
975  * unfortunately, that's not true in practice because of early VIA
976  * chips and (more importantly) broken virtualizers that are not easy
977  * to detect. In the latter case it doesn't even *fail* reliably, so
978  * probing for it doesn't even work. Disable it completely on 32-bit
979  * unless we can find a reliable way to detect all the broken cases.
980  * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
981  */
982 static void detect_nopl(struct cpuinfo_x86 *c)
983 {
984 #ifdef CONFIG_X86_32
985 	clear_cpu_cap(c, X86_FEATURE_NOPL);
986 #else
987 	set_cpu_cap(c, X86_FEATURE_NOPL);
988 #endif
989 }
990 
991 static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
992 {
993 #ifdef CONFIG_X86_64
994 	/*
995 	 * Empirically, writing zero to a segment selector on AMD does
996 	 * not clear the base, whereas writing zero to a segment
997 	 * selector on Intel does clear the base.  Intel's behavior
998 	 * allows slightly faster context switches in the common case
999 	 * where GS is unused by the prev and next threads.
1000 	 *
1001 	 * Since neither vendor documents this anywhere that I can see,
1002 	 * detect it directly instead of hardcoding the choice by
1003 	 * vendor.
1004 	 *
1005 	 * I've designated AMD's behavior as the "bug" because it's
1006 	 * counterintuitive and less friendly.
1007 	 */
1008 
1009 	unsigned long old_base, tmp;
1010 	rdmsrl(MSR_FS_BASE, old_base);
1011 	wrmsrl(MSR_FS_BASE, 1);
1012 	loadsegment(fs, 0);
1013 	rdmsrl(MSR_FS_BASE, tmp);
1014 	if (tmp != 0)
1015 		set_cpu_bug(c, X86_BUG_NULL_SEG);
1016 	wrmsrl(MSR_FS_BASE, old_base);
1017 #endif
1018 }
1019 
1020 static void generic_identify(struct cpuinfo_x86 *c)
1021 {
1022 	c->extended_cpuid_level = 0;
1023 
1024 	if (!have_cpuid_p())
1025 		identify_cpu_without_cpuid(c);
1026 
1027 	/* cyrix could have cpuid enabled via c_identify()*/
1028 	if (!have_cpuid_p())
1029 		return;
1030 
1031 	cpu_detect(c);
1032 
1033 	get_cpu_vendor(c);
1034 
1035 	get_cpu_cap(c);
1036 
1037 	if (c->cpuid_level >= 0x00000001) {
1038 		c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
1039 #ifdef CONFIG_X86_32
1040 # ifdef CONFIG_SMP
1041 		c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1042 # else
1043 		c->apicid = c->initial_apicid;
1044 # endif
1045 #endif
1046 		c->phys_proc_id = c->initial_apicid;
1047 	}
1048 
1049 	get_model_name(c); /* Default name */
1050 
1051 	detect_nopl(c);
1052 
1053 	detect_null_seg_behavior(c);
1054 
1055 	/*
1056 	 * ESPFIX is a strange bug.  All real CPUs have it.  Paravirt
1057 	 * systems that run Linux at CPL > 0 may or may not have the
1058 	 * issue, but, even if they have the issue, there's absolutely
1059 	 * nothing we can do about it because we can't use the real IRET
1060 	 * instruction.
1061 	 *
1062 	 * NB: For the time being, only 32-bit kernels support
1063 	 * X86_BUG_ESPFIX as such.  64-bit kernels directly choose
1064 	 * whether to apply espfix using paravirt hooks.  If any
1065 	 * non-paravirt system ever shows up that does *not* have the
1066 	 * ESPFIX issue, we can change this.
1067 	 */
1068 #ifdef CONFIG_X86_32
1069 # ifdef CONFIG_PARAVIRT
1070 	do {
1071 		extern void native_iret(void);
1072 		if (pv_cpu_ops.iret == native_iret)
1073 			set_cpu_bug(c, X86_BUG_ESPFIX);
1074 	} while (0);
1075 # else
1076 	set_cpu_bug(c, X86_BUG_ESPFIX);
1077 # endif
1078 #endif
1079 }
1080 
1081 static void x86_init_cache_qos(struct cpuinfo_x86 *c)
1082 {
1083 	/*
1084 	 * The heavy lifting of max_rmid and cache_occ_scale are handled
1085 	 * in get_cpu_cap().  Here we just set the max_rmid for the boot_cpu
1086 	 * in case CQM bits really aren't there in this CPU.
1087 	 */
1088 	if (c != &boot_cpu_data) {
1089 		boot_cpu_data.x86_cache_max_rmid =
1090 			min(boot_cpu_data.x86_cache_max_rmid,
1091 			    c->x86_cache_max_rmid);
1092 	}
1093 }
1094 
1095 /*
1096  * Validate that ACPI/mptables have the same information about the
1097  * effective APIC id and update the package map.
1098  */
1099 static void validate_apic_and_package_id(struct cpuinfo_x86 *c)
1100 {
1101 #ifdef CONFIG_SMP
1102 	unsigned int apicid, cpu = smp_processor_id();
1103 
1104 	apicid = apic->cpu_present_to_apicid(cpu);
1105 
1106 	if (apicid != c->apicid) {
1107 		pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
1108 		       cpu, apicid, c->initial_apicid);
1109 	}
1110 	BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
1111 #else
1112 	c->logical_proc_id = 0;
1113 #endif
1114 }
1115 
1116 /*
1117  * This does the hard work of actually picking apart the CPU stuff...
1118  */
1119 static void identify_cpu(struct cpuinfo_x86 *c)
1120 {
1121 	int i;
1122 
1123 	c->loops_per_jiffy = loops_per_jiffy;
1124 	c->x86_cache_size = -1;
1125 	c->x86_vendor = X86_VENDOR_UNKNOWN;
1126 	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
1127 	c->x86_vendor_id[0] = '\0'; /* Unset */
1128 	c->x86_model_id[0] = '\0';  /* Unset */
1129 	c->x86_max_cores = 1;
1130 	c->x86_coreid_bits = 0;
1131 	c->cu_id = 0xff;
1132 #ifdef CONFIG_X86_64
1133 	c->x86_clflush_size = 64;
1134 	c->x86_phys_bits = 36;
1135 	c->x86_virt_bits = 48;
1136 #else
1137 	c->cpuid_level = -1;	/* CPUID not detected */
1138 	c->x86_clflush_size = 32;
1139 	c->x86_phys_bits = 32;
1140 	c->x86_virt_bits = 32;
1141 #endif
1142 	c->x86_cache_alignment = c->x86_clflush_size;
1143 	memset(&c->x86_capability, 0, sizeof c->x86_capability);
1144 
1145 	generic_identify(c);
1146 
1147 	if (this_cpu->c_identify)
1148 		this_cpu->c_identify(c);
1149 
1150 	/* Clear/Set all flags overridden by options, after probe */
1151 	apply_forced_caps(c);
1152 
1153 #ifdef CONFIG_X86_64
1154 	c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1155 #endif
1156 
1157 	/*
1158 	 * Vendor-specific initialization.  In this section we
1159 	 * canonicalize the feature flags, meaning if there are
1160 	 * features a certain CPU supports which CPUID doesn't
1161 	 * tell us, CPUID claiming incorrect flags, or other bugs,
1162 	 * we handle them here.
1163 	 *
1164 	 * At the end of this section, c->x86_capability better
1165 	 * indicate the features this CPU genuinely supports!
1166 	 */
1167 	if (this_cpu->c_init)
1168 		this_cpu->c_init(c);
1169 
1170 	/* Disable the PN if appropriate */
1171 	squash_the_stupid_serial_number(c);
1172 
1173 	/* Set up SMEP/SMAP/UMIP */
1174 	setup_smep(c);
1175 	setup_smap(c);
1176 	setup_umip(c);
1177 
1178 	/*
1179 	 * The vendor-specific functions might have changed features.
1180 	 * Now we do "generic changes."
1181 	 */
1182 
1183 	/* Filter out anything that depends on CPUID levels we don't have */
1184 	filter_cpuid_features(c, true);
1185 
1186 	/* If the model name is still unset, do table lookup. */
1187 	if (!c->x86_model_id[0]) {
1188 		const char *p;
1189 		p = table_lookup_model(c);
1190 		if (p)
1191 			strcpy(c->x86_model_id, p);
1192 		else
1193 			/* Last resort... */
1194 			sprintf(c->x86_model_id, "%02x/%02x",
1195 				c->x86, c->x86_model);
1196 	}
1197 
1198 #ifdef CONFIG_X86_64
1199 	detect_ht(c);
1200 #endif
1201 
1202 	x86_init_rdrand(c);
1203 	x86_init_cache_qos(c);
1204 	setup_pku(c);
1205 
1206 	/*
1207 	 * Clear/Set all flags overridden by options, need do it
1208 	 * before following smp all cpus cap AND.
1209 	 */
1210 	apply_forced_caps(c);
1211 
1212 	/*
1213 	 * On SMP, boot_cpu_data holds the common feature set between
1214 	 * all CPUs; so make sure that we indicate which features are
1215 	 * common between the CPUs.  The first time this routine gets
1216 	 * executed, c == &boot_cpu_data.
1217 	 */
1218 	if (c != &boot_cpu_data) {
1219 		/* AND the already accumulated flags with these */
1220 		for (i = 0; i < NCAPINTS; i++)
1221 			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1222 
1223 		/* OR, i.e. replicate the bug flags */
1224 		for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
1225 			c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
1226 	}
1227 
1228 	/* Init Machine Check Exception if available. */
1229 	mcheck_cpu_init(c);
1230 
1231 	select_idle_routine(c);
1232 
1233 #ifdef CONFIG_NUMA
1234 	numa_add_cpu(smp_processor_id());
1235 #endif
1236 }
1237 
1238 /*
1239  * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
1240  * on 32-bit kernels:
1241  */
1242 #ifdef CONFIG_X86_32
1243 void enable_sep_cpu(void)
1244 {
1245 	struct tss_struct *tss;
1246 	int cpu;
1247 
1248 	if (!boot_cpu_has(X86_FEATURE_SEP))
1249 		return;
1250 
1251 	cpu = get_cpu();
1252 	tss = &per_cpu(cpu_tss_rw, cpu);
1253 
1254 	/*
1255 	 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
1256 	 * see the big comment in struct x86_hw_tss's definition.
1257 	 */
1258 
1259 	tss->x86_tss.ss1 = __KERNEL_CS;
1260 	wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
1261 	wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
1262 	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
1263 
1264 	put_cpu();
1265 }
1266 #endif
1267 
1268 void __init identify_boot_cpu(void)
1269 {
1270 	identify_cpu(&boot_cpu_data);
1271 #ifdef CONFIG_X86_32
1272 	sysenter_setup();
1273 	enable_sep_cpu();
1274 #endif
1275 	cpu_detect_tlb(&boot_cpu_data);
1276 }
1277 
1278 void identify_secondary_cpu(struct cpuinfo_x86 *c)
1279 {
1280 	BUG_ON(c == &boot_cpu_data);
1281 	identify_cpu(c);
1282 #ifdef CONFIG_X86_32
1283 	enable_sep_cpu();
1284 #endif
1285 	mtrr_ap_init();
1286 	validate_apic_and_package_id(c);
1287 }
1288 
1289 static __init int setup_noclflush(char *arg)
1290 {
1291 	setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
1292 	setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT);
1293 	return 1;
1294 }
1295 __setup("noclflush", setup_noclflush);
1296 
1297 void print_cpu_info(struct cpuinfo_x86 *c)
1298 {
1299 	const char *vendor = NULL;
1300 
1301 	if (c->x86_vendor < X86_VENDOR_NUM) {
1302 		vendor = this_cpu->c_vendor;
1303 	} else {
1304 		if (c->cpuid_level >= 0)
1305 			vendor = c->x86_vendor_id;
1306 	}
1307 
1308 	if (vendor && !strstr(c->x86_model_id, vendor))
1309 		pr_cont("%s ", vendor);
1310 
1311 	if (c->x86_model_id[0])
1312 		pr_cont("%s", c->x86_model_id);
1313 	else
1314 		pr_cont("%d86", c->x86);
1315 
1316 	pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
1317 
1318 	if (c->x86_mask || c->cpuid_level >= 0)
1319 		pr_cont(", stepping: 0x%x)\n", c->x86_mask);
1320 	else
1321 		pr_cont(")\n");
1322 }
1323 
1324 /*
1325  * clearcpuid= was already parsed in fpu__init_parse_early_param.
1326  * But we need to keep a dummy __setup around otherwise it would
1327  * show up as an environment variable for init.
1328  */
1329 static __init int setup_clearcpuid(char *arg)
1330 {
1331 	return 1;
1332 }
1333 __setup("clearcpuid=", setup_clearcpuid);
1334 
1335 #ifdef CONFIG_X86_64
1336 DEFINE_PER_CPU_FIRST(union irq_stack_union,
1337 		     irq_stack_union) __aligned(PAGE_SIZE) __visible;
1338 
1339 /*
1340  * The following percpu variables are hot.  Align current_task to
1341  * cacheline size such that they fall in the same cacheline.
1342  */
1343 DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
1344 	&init_task;
1345 EXPORT_PER_CPU_SYMBOL(current_task);
1346 
1347 DEFINE_PER_CPU(char *, irq_stack_ptr) =
1348 	init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE;
1349 
1350 DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
1351 
1352 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1353 EXPORT_PER_CPU_SYMBOL(__preempt_count);
1354 
1355 /* May not be marked __init: used by software suspend */
1356 void syscall_init(void)
1357 {
1358 	extern char _entry_trampoline[];
1359 	extern char entry_SYSCALL_64_trampoline[];
1360 
1361 	int cpu = smp_processor_id();
1362 	unsigned long SYSCALL64_entry_trampoline =
1363 		(unsigned long)get_cpu_entry_area(cpu)->entry_trampoline +
1364 		(entry_SYSCALL_64_trampoline - _entry_trampoline);
1365 
1366 	wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
1367 	if (static_cpu_has(X86_FEATURE_PTI))
1368 		wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline);
1369 	else
1370 		wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
1371 
1372 #ifdef CONFIG_IA32_EMULATION
1373 	wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
1374 	/*
1375 	 * This only works on Intel CPUs.
1376 	 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
1377 	 * This does not cause SYSENTER to jump to the wrong location, because
1378 	 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
1379 	 */
1380 	wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
1381 	wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1));
1382 	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
1383 #else
1384 	wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
1385 	wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
1386 	wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
1387 	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
1388 #endif
1389 
1390 	/* Flags to clear on syscall */
1391 	wrmsrl(MSR_SYSCALL_MASK,
1392 	       X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
1393 	       X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
1394 }
1395 
1396 /*
1397  * Copies of the original ist values from the tss are only accessed during
1398  * debugging, no special alignment required.
1399  */
1400 DEFINE_PER_CPU(struct orig_ist, orig_ist);
1401 
1402 static DEFINE_PER_CPU(unsigned long, debug_stack_addr);
1403 DEFINE_PER_CPU(int, debug_stack_usage);
1404 
1405 int is_debug_stack(unsigned long addr)
1406 {
1407 	return __this_cpu_read(debug_stack_usage) ||
1408 		(addr <= __this_cpu_read(debug_stack_addr) &&
1409 		 addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ));
1410 }
1411 NOKPROBE_SYMBOL(is_debug_stack);
1412 
1413 DEFINE_PER_CPU(u32, debug_idt_ctr);
1414 
1415 void debug_stack_set_zero(void)
1416 {
1417 	this_cpu_inc(debug_idt_ctr);
1418 	load_current_idt();
1419 }
1420 NOKPROBE_SYMBOL(debug_stack_set_zero);
1421 
1422 void debug_stack_reset(void)
1423 {
1424 	if (WARN_ON(!this_cpu_read(debug_idt_ctr)))
1425 		return;
1426 	if (this_cpu_dec_return(debug_idt_ctr) == 0)
1427 		load_current_idt();
1428 }
1429 NOKPROBE_SYMBOL(debug_stack_reset);
1430 
1431 #else	/* CONFIG_X86_64 */
1432 
1433 DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
1434 EXPORT_PER_CPU_SYMBOL(current_task);
1435 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1436 EXPORT_PER_CPU_SYMBOL(__preempt_count);
1437 
1438 /*
1439  * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
1440  * the top of the kernel stack.  Use an extra percpu variable to track the
1441  * top of the kernel stack directly.
1442  */
1443 DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
1444 	(unsigned long)&init_thread_union + THREAD_SIZE;
1445 EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
1446 
1447 #ifdef CONFIG_CC_STACKPROTECTOR
1448 DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
1449 #endif
1450 
1451 #endif	/* CONFIG_X86_64 */
1452 
1453 /*
1454  * Clear all 6 debug registers:
1455  */
1456 static void clear_all_debug_regs(void)
1457 {
1458 	int i;
1459 
1460 	for (i = 0; i < 8; i++) {
1461 		/* Ignore db4, db5 */
1462 		if ((i == 4) || (i == 5))
1463 			continue;
1464 
1465 		set_debugreg(0, i);
1466 	}
1467 }
1468 
1469 #ifdef CONFIG_KGDB
1470 /*
1471  * Restore debug regs if using kgdbwait and you have a kernel debugger
1472  * connection established.
1473  */
1474 static void dbg_restore_debug_regs(void)
1475 {
1476 	if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
1477 		arch_kgdb_ops.correct_hw_break();
1478 }
1479 #else /* ! CONFIG_KGDB */
1480 #define dbg_restore_debug_regs()
1481 #endif /* ! CONFIG_KGDB */
1482 
1483 static void wait_for_master_cpu(int cpu)
1484 {
1485 #ifdef CONFIG_SMP
1486 	/*
1487 	 * wait for ACK from master CPU before continuing
1488 	 * with AP initialization
1489 	 */
1490 	WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
1491 	while (!cpumask_test_cpu(cpu, cpu_callout_mask))
1492 		cpu_relax();
1493 #endif
1494 }
1495 
1496 /*
1497  * cpu_init() initializes state that is per-CPU. Some data is already
1498  * initialized (naturally) in the bootstrap process, such as the GDT
1499  * and IDT. We reload them nevertheless, this function acts as a
1500  * 'CPU state barrier', nothing should get across.
1501  * A lot of state is already set up in PDA init for 64 bit
1502  */
1503 #ifdef CONFIG_X86_64
1504 
1505 void cpu_init(void)
1506 {
1507 	struct orig_ist *oist;
1508 	struct task_struct *me;
1509 	struct tss_struct *t;
1510 	unsigned long v;
1511 	int cpu = raw_smp_processor_id();
1512 	int i;
1513 
1514 	wait_for_master_cpu(cpu);
1515 
1516 	/*
1517 	 * Initialize the CR4 shadow before doing anything that could
1518 	 * try to read it.
1519 	 */
1520 	cr4_init_shadow();
1521 
1522 	if (cpu)
1523 		load_ucode_ap();
1524 
1525 	t = &per_cpu(cpu_tss_rw, cpu);
1526 	oist = &per_cpu(orig_ist, cpu);
1527 
1528 #ifdef CONFIG_NUMA
1529 	if (this_cpu_read(numa_node) == 0 &&
1530 	    early_cpu_to_node(cpu) != NUMA_NO_NODE)
1531 		set_numa_node(early_cpu_to_node(cpu));
1532 #endif
1533 
1534 	me = current;
1535 
1536 	pr_debug("Initializing CPU#%d\n", cpu);
1537 
1538 	cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1539 
1540 	/*
1541 	 * Initialize the per-CPU GDT with the boot GDT,
1542 	 * and set up the GDT descriptor:
1543 	 */
1544 
1545 	switch_to_new_gdt(cpu);
1546 	loadsegment(fs, 0);
1547 
1548 	load_current_idt();
1549 
1550 	memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
1551 	syscall_init();
1552 
1553 	wrmsrl(MSR_FS_BASE, 0);
1554 	wrmsrl(MSR_KERNEL_GS_BASE, 0);
1555 	barrier();
1556 
1557 	x86_configure_nx();
1558 	x2apic_setup();
1559 
1560 	/*
1561 	 * set up and load the per-CPU TSS
1562 	 */
1563 	if (!oist->ist[0]) {
1564 		char *estacks = get_cpu_entry_area(cpu)->exception_stacks;
1565 
1566 		for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1567 			estacks += exception_stack_sizes[v];
1568 			oist->ist[v] = t->x86_tss.ist[v] =
1569 					(unsigned long)estacks;
1570 			if (v == DEBUG_STACK-1)
1571 				per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks;
1572 		}
1573 	}
1574 
1575 	t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
1576 
1577 	/*
1578 	 * <= is required because the CPU will access up to
1579 	 * 8 bits beyond the end of the IO permission bitmap.
1580 	 */
1581 	for (i = 0; i <= IO_BITMAP_LONGS; i++)
1582 		t->io_bitmap[i] = ~0UL;
1583 
1584 	mmgrab(&init_mm);
1585 	me->active_mm = &init_mm;
1586 	BUG_ON(me->mm);
1587 	initialize_tlbstate_and_flush();
1588 	enter_lazy_tlb(&init_mm, me);
1589 
1590 	/*
1591 	 * Initialize the TSS.  sp0 points to the entry trampoline stack
1592 	 * regardless of what task is running.
1593 	 */
1594 	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
1595 	load_TR_desc();
1596 	load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
1597 
1598 	load_mm_ldt(&init_mm);
1599 
1600 	clear_all_debug_regs();
1601 	dbg_restore_debug_regs();
1602 
1603 	fpu__init_cpu();
1604 
1605 	if (is_uv_system())
1606 		uv_cpu_init();
1607 
1608 	load_fixmap_gdt(cpu);
1609 }
1610 
1611 #else
1612 
1613 void cpu_init(void)
1614 {
1615 	int cpu = smp_processor_id();
1616 	struct task_struct *curr = current;
1617 	struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu);
1618 
1619 	wait_for_master_cpu(cpu);
1620 
1621 	/*
1622 	 * Initialize the CR4 shadow before doing anything that could
1623 	 * try to read it.
1624 	 */
1625 	cr4_init_shadow();
1626 
1627 	show_ucode_info_early();
1628 
1629 	pr_info("Initializing CPU#%d\n", cpu);
1630 
1631 	if (cpu_feature_enabled(X86_FEATURE_VME) ||
1632 	    boot_cpu_has(X86_FEATURE_TSC) ||
1633 	    boot_cpu_has(X86_FEATURE_DE))
1634 		cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1635 
1636 	load_current_idt();
1637 	switch_to_new_gdt(cpu);
1638 
1639 	/*
1640 	 * Set up and load the per-CPU TSS and LDT
1641 	 */
1642 	mmgrab(&init_mm);
1643 	curr->active_mm = &init_mm;
1644 	BUG_ON(curr->mm);
1645 	initialize_tlbstate_and_flush();
1646 	enter_lazy_tlb(&init_mm, curr);
1647 
1648 	/*
1649 	 * Initialize the TSS.  Don't bother initializing sp0, as the initial
1650 	 * task never enters user mode.
1651 	 */
1652 	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
1653 	load_TR_desc();
1654 
1655 	load_mm_ldt(&init_mm);
1656 
1657 	t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
1658 
1659 #ifdef CONFIG_DOUBLEFAULT
1660 	/* Set up doublefault TSS pointer in the GDT */
1661 	__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1662 #endif
1663 
1664 	clear_all_debug_regs();
1665 	dbg_restore_debug_regs();
1666 
1667 	fpu__init_cpu();
1668 
1669 	load_fixmap_gdt(cpu);
1670 }
1671 #endif
1672 
1673 static void bsp_resume(void)
1674 {
1675 	if (this_cpu->c_bsp_resume)
1676 		this_cpu->c_bsp_resume(&boot_cpu_data);
1677 }
1678 
1679 static struct syscore_ops cpu_syscore_ops = {
1680 	.resume		= bsp_resume,
1681 };
1682 
1683 static int __init init_cpu_syscore(void)
1684 {
1685 	register_syscore_ops(&cpu_syscore_ops);
1686 	return 0;
1687 }
1688 core_initcall(init_cpu_syscore);
1689