xref: /openbmc/linux/arch/x86/kernel/smpboot.c (revision 1fa0a7dc)
1  /*
2  *	x86 SMP booting functions
3  *
4  *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
5  *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
6  *	Copyright 2001 Andi Kleen, SuSE Labs.
7  *
8  *	Much of the core SMP work is based on previous work by Thomas Radke, to
9  *	whom a great many thanks are extended.
10  *
11  *	Thanks to Intel for making available several different Pentium,
12  *	Pentium Pro and Pentium-II/Xeon MP machines.
13  *	Original development of Linux SMP code supported by Caldera.
14  *
15  *	This code is released under the GNU General Public License version 2 or
16  *	later.
17  *
18  *	Fixes
19  *		Felix Koop	:	NR_CPUS used properly
20  *		Jose Renau	:	Handle single CPU case.
21  *		Alan Cox	:	By repeated request 8) - Total BogoMIPS report.
22  *		Greg Wright	:	Fix for kernel stacks panic.
23  *		Erich Boleyn	:	MP v1.4 and additional changes.
24  *	Matthias Sattler	:	Changes for 2.1 kernel map.
25  *	Michel Lespinasse	:	Changes for 2.1 kernel map.
26  *	Michael Chastain	:	Change trampoline.S to gnu as.
27  *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine
28  *		Ingo Molnar	:	Added APIC timers, based on code
29  *					from Jose Renau
30  *		Ingo Molnar	:	various cleanups and rewrites
31  *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
32  *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
33  *	Andi Kleen		:	Changed for SMP boot into long mode.
34  *		Martin J. Bligh	: 	Added support for multi-quad systems
35  *		Dave Jones	:	Report invalid combinations of Athlon CPUs.
36  *		Rusty Russell	:	Hacked into shape for new "hotplug" boot process.
37  *      Andi Kleen              :       Converted to new state machine.
38  *	Ashok Raj		: 	CPU hotplug support
39  *	Glauber Costa		:	i386 and x86_64 integration
40  */
41 
42 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
43 
44 #include <linux/init.h>
45 #include <linux/smp.h>
46 #include <linux/export.h>
47 #include <linux/sched.h>
48 #include <linux/sched/topology.h>
49 #include <linux/sched/hotplug.h>
50 #include <linux/sched/task_stack.h>
51 #include <linux/percpu.h>
52 #include <linux/memblock.h>
53 #include <linux/err.h>
54 #include <linux/nmi.h>
55 #include <linux/tboot.h>
56 #include <linux/stackprotector.h>
57 #include <linux/gfp.h>
58 #include <linux/cpuidle.h>
59 #include <linux/numa.h>
60 
61 #include <asm/acpi.h>
62 #include <asm/desc.h>
63 #include <asm/nmi.h>
64 #include <asm/irq.h>
65 #include <asm/realmode.h>
66 #include <asm/cpu.h>
67 #include <asm/numa.h>
68 #include <asm/pgtable.h>
69 #include <asm/tlbflush.h>
70 #include <asm/mtrr.h>
71 #include <asm/mwait.h>
72 #include <asm/apic.h>
73 #include <asm/io_apic.h>
74 #include <asm/fpu/internal.h>
75 #include <asm/setup.h>
76 #include <asm/uv/uv.h>
77 #include <linux/mc146818rtc.h>
78 #include <asm/i8259.h>
79 #include <asm/misc.h>
80 #include <asm/qspinlock.h>
81 #include <asm/intel-family.h>
82 #include <asm/cpu_device_id.h>
83 #include <asm/spec-ctrl.h>
84 #include <asm/hw_irq.h>
85 
86 /* representing HT siblings of each logical CPU */
87 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
88 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
89 
90 /* representing HT and core siblings of each logical CPU */
91 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
92 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
93 
94 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
95 
96 /* Per CPU bogomips and other parameters */
97 DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
98 EXPORT_PER_CPU_SYMBOL(cpu_info);
99 
100 /* Logical package management. We might want to allocate that dynamically */
101 unsigned int __max_logical_packages __read_mostly;
102 EXPORT_SYMBOL(__max_logical_packages);
103 static unsigned int logical_packages __read_mostly;
104 
105 /* Maximum number of SMT threads on any online core */
106 int __read_mostly __max_smt_threads = 1;
107 
108 /* Flag to indicate if a complete sched domain rebuild is required */
109 bool x86_topology_update;
110 
111 int arch_update_cpu_topology(void)
112 {
113 	int retval = x86_topology_update;
114 
115 	x86_topology_update = false;
116 	return retval;
117 }
118 
119 static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
120 {
121 	unsigned long flags;
122 
123 	spin_lock_irqsave(&rtc_lock, flags);
124 	CMOS_WRITE(0xa, 0xf);
125 	spin_unlock_irqrestore(&rtc_lock, flags);
126 	*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
127 							start_eip >> 4;
128 	*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
129 							start_eip & 0xf;
130 }
131 
132 static inline void smpboot_restore_warm_reset_vector(void)
133 {
134 	unsigned long flags;
135 
136 	/*
137 	 * Paranoid:  Set warm reset code and vector here back
138 	 * to default values.
139 	 */
140 	spin_lock_irqsave(&rtc_lock, flags);
141 	CMOS_WRITE(0, 0xf);
142 	spin_unlock_irqrestore(&rtc_lock, flags);
143 
144 	*((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
145 }
146 
147 /*
148  * Report back to the Boot Processor during boot time or to the caller processor
149  * during CPU online.
150  */
151 static void smp_callin(void)
152 {
153 	int cpuid;
154 
155 	/*
156 	 * If waken up by an INIT in an 82489DX configuration
157 	 * cpu_callout_mask guarantees we don't get here before
158 	 * an INIT_deassert IPI reaches our local APIC, so it is
159 	 * now safe to touch our local APIC.
160 	 */
161 	cpuid = smp_processor_id();
162 
163 	/*
164 	 * the boot CPU has finished the init stage and is spinning
165 	 * on callin_map until we finish. We are free to set up this
166 	 * CPU, first the APIC. (this is probably redundant on most
167 	 * boards)
168 	 */
169 	apic_ap_setup();
170 
171 	/*
172 	 * Save our processor parameters. Note: this information
173 	 * is needed for clock calibration.
174 	 */
175 	smp_store_cpu_info(cpuid);
176 
177 	/*
178 	 * The topology information must be up to date before
179 	 * calibrate_delay() and notify_cpu_starting().
180 	 */
181 	set_cpu_sibling_map(raw_smp_processor_id());
182 
183 	/*
184 	 * Get our bogomips.
185 	 * Update loops_per_jiffy in cpu_data. Previous call to
186 	 * smp_store_cpu_info() stored a value that is close but not as
187 	 * accurate as the value just calculated.
188 	 */
189 	calibrate_delay();
190 	cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
191 	pr_debug("Stack at about %p\n", &cpuid);
192 
193 	wmb();
194 
195 	notify_cpu_starting(cpuid);
196 
197 	/*
198 	 * Allow the master to continue.
199 	 */
200 	cpumask_set_cpu(cpuid, cpu_callin_mask);
201 }
202 
203 static int cpu0_logical_apicid;
204 static int enable_start_cpu0;
205 /*
206  * Activate a secondary processor.
207  */
208 static void notrace start_secondary(void *unused)
209 {
210 	/*
211 	 * Don't put *anything* except direct CPU state initialization
212 	 * before cpu_init(), SMP booting is too fragile that we want to
213 	 * limit the things done here to the most necessary things.
214 	 */
215 	if (boot_cpu_has(X86_FEATURE_PCID))
216 		__write_cr4(__read_cr4() | X86_CR4_PCIDE);
217 
218 #ifdef CONFIG_X86_32
219 	/* switch away from the initial page table */
220 	load_cr3(swapper_pg_dir);
221 	/*
222 	 * Initialize the CR4 shadow before doing anything that could
223 	 * try to read it.
224 	 */
225 	cr4_init_shadow();
226 	__flush_tlb_all();
227 #endif
228 	load_current_idt();
229 	cpu_init();
230 	x86_cpuinit.early_percpu_clock_init();
231 	preempt_disable();
232 	smp_callin();
233 
234 	enable_start_cpu0 = 0;
235 
236 	/* otherwise gcc will move up smp_processor_id before the cpu_init */
237 	barrier();
238 	/*
239 	 * Check TSC synchronization with the boot CPU:
240 	 */
241 	check_tsc_sync_target();
242 
243 	speculative_store_bypass_ht_init();
244 
245 	/*
246 	 * Lock vector_lock, set CPU online and bring the vector
247 	 * allocator online. Online must be set with vector_lock held
248 	 * to prevent a concurrent irq setup/teardown from seeing a
249 	 * half valid vector space.
250 	 */
251 	lock_vector_lock();
252 	set_cpu_online(smp_processor_id(), true);
253 	lapic_online();
254 	unlock_vector_lock();
255 	cpu_set_state_online(smp_processor_id());
256 	x86_platform.nmi_init();
257 
258 	/* enable local interrupts */
259 	local_irq_enable();
260 
261 	/* to prevent fake stack check failure in clock setup */
262 	boot_init_stack_canary();
263 
264 	x86_cpuinit.setup_percpu_clockev();
265 
266 	wmb();
267 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
268 }
269 
270 /**
271  * topology_is_primary_thread - Check whether CPU is the primary SMT thread
272  * @cpu:	CPU to check
273  */
274 bool topology_is_primary_thread(unsigned int cpu)
275 {
276 	return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu));
277 }
278 
279 /**
280  * topology_smt_supported - Check whether SMT is supported by the CPUs
281  */
282 bool topology_smt_supported(void)
283 {
284 	return smp_num_siblings > 1;
285 }
286 
287 /**
288  * topology_phys_to_logical_pkg - Map a physical package id to a logical
289  *
290  * Returns logical package id or -1 if not found
291  */
292 int topology_phys_to_logical_pkg(unsigned int phys_pkg)
293 {
294 	int cpu;
295 
296 	for_each_possible_cpu(cpu) {
297 		struct cpuinfo_x86 *c = &cpu_data(cpu);
298 
299 		if (c->initialized && c->phys_proc_id == phys_pkg)
300 			return c->logical_proc_id;
301 	}
302 	return -1;
303 }
304 EXPORT_SYMBOL(topology_phys_to_logical_pkg);
305 
306 /**
307  * topology_update_package_map - Update the physical to logical package map
308  * @pkg:	The physical package id as retrieved via CPUID
309  * @cpu:	The cpu for which this is updated
310  */
311 int topology_update_package_map(unsigned int pkg, unsigned int cpu)
312 {
313 	int new;
314 
315 	/* Already available somewhere? */
316 	new = topology_phys_to_logical_pkg(pkg);
317 	if (new >= 0)
318 		goto found;
319 
320 	new = logical_packages++;
321 	if (new != pkg) {
322 		pr_info("CPU %u Converting physical %u to logical package %u\n",
323 			cpu, pkg, new);
324 	}
325 found:
326 	cpu_data(cpu).logical_proc_id = new;
327 	return 0;
328 }
329 
330 void __init smp_store_boot_cpu_info(void)
331 {
332 	int id = 0; /* CPU 0 */
333 	struct cpuinfo_x86 *c = &cpu_data(id);
334 
335 	*c = boot_cpu_data;
336 	c->cpu_index = id;
337 	topology_update_package_map(c->phys_proc_id, id);
338 	c->initialized = true;
339 }
340 
341 /*
342  * The bootstrap kernel entry code has set these up. Save them for
343  * a given CPU
344  */
345 void smp_store_cpu_info(int id)
346 {
347 	struct cpuinfo_x86 *c = &cpu_data(id);
348 
349 	/* Copy boot_cpu_data only on the first bringup */
350 	if (!c->initialized)
351 		*c = boot_cpu_data;
352 	c->cpu_index = id;
353 	/*
354 	 * During boot time, CPU0 has this setup already. Save the info when
355 	 * bringing up AP or offlined CPU0.
356 	 */
357 	identify_secondary_cpu(c);
358 	c->initialized = true;
359 }
360 
361 static bool
362 topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
363 {
364 	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
365 
366 	return (cpu_to_node(cpu1) == cpu_to_node(cpu2));
367 }
368 
369 static bool
370 topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
371 {
372 	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
373 
374 	return !WARN_ONCE(!topology_same_node(c, o),
375 		"sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
376 		"[node: %d != %d]. Ignoring dependency.\n",
377 		cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
378 }
379 
380 #define link_mask(mfunc, c1, c2)					\
381 do {									\
382 	cpumask_set_cpu((c1), mfunc(c2));				\
383 	cpumask_set_cpu((c2), mfunc(c1));				\
384 } while (0)
385 
386 static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
387 {
388 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
389 		int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
390 
391 		if (c->phys_proc_id == o->phys_proc_id &&
392 		    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
393 			if (c->cpu_core_id == o->cpu_core_id)
394 				return topology_sane(c, o, "smt");
395 
396 			if ((c->cu_id != 0xff) &&
397 			    (o->cu_id != 0xff) &&
398 			    (c->cu_id == o->cu_id))
399 				return topology_sane(c, o, "smt");
400 		}
401 
402 	} else if (c->phys_proc_id == o->phys_proc_id &&
403 		   c->cpu_core_id == o->cpu_core_id) {
404 		return topology_sane(c, o, "smt");
405 	}
406 
407 	return false;
408 }
409 
410 /*
411  * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs.
412  *
413  * These are Intel CPUs that enumerate an LLC that is shared by
414  * multiple NUMA nodes. The LLC on these systems is shared for
415  * off-package data access but private to the NUMA node (half
416  * of the package) for on-package access.
417  *
418  * CPUID (the source of the information about the LLC) can only
419  * enumerate the cache as being shared *or* unshared, but not
420  * this particular configuration. The CPU in this case enumerates
421  * the cache to be shared across the entire package (spanning both
422  * NUMA nodes).
423  */
424 
425 static const struct x86_cpu_id snc_cpu[] = {
426 	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X },
427 	{}
428 };
429 
430 static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
431 {
432 	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
433 
434 	/* Do not match if we do not have a valid APICID for cpu: */
435 	if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
436 		return false;
437 
438 	/* Do not match if LLC id does not match: */
439 	if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2))
440 		return false;
441 
442 	/*
443 	 * Allow the SNC topology without warning. Return of false
444 	 * means 'c' does not share the LLC of 'o'. This will be
445 	 * reflected to userspace.
446 	 */
447 	if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu))
448 		return false;
449 
450 	return topology_sane(c, o, "llc");
451 }
452 
453 /*
454  * Unlike the other levels, we do not enforce keeping a
455  * multicore group inside a NUMA node.  If this happens, we will
456  * discard the MC level of the topology later.
457  */
458 static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
459 {
460 	if (c->phys_proc_id == o->phys_proc_id)
461 		return true;
462 	return false;
463 }
464 
465 #if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
466 static inline int x86_sched_itmt_flags(void)
467 {
468 	return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0;
469 }
470 
471 #ifdef CONFIG_SCHED_MC
472 static int x86_core_flags(void)
473 {
474 	return cpu_core_flags() | x86_sched_itmt_flags();
475 }
476 #endif
477 #ifdef CONFIG_SCHED_SMT
478 static int x86_smt_flags(void)
479 {
480 	return cpu_smt_flags() | x86_sched_itmt_flags();
481 }
482 #endif
483 #endif
484 
485 static struct sched_domain_topology_level x86_numa_in_package_topology[] = {
486 #ifdef CONFIG_SCHED_SMT
487 	{ cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
488 #endif
489 #ifdef CONFIG_SCHED_MC
490 	{ cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
491 #endif
492 	{ NULL, },
493 };
494 
495 static struct sched_domain_topology_level x86_topology[] = {
496 #ifdef CONFIG_SCHED_SMT
497 	{ cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
498 #endif
499 #ifdef CONFIG_SCHED_MC
500 	{ cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
501 #endif
502 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
503 	{ NULL, },
504 };
505 
506 /*
507  * Set if a package/die has multiple NUMA nodes inside.
508  * AMD Magny-Cours, Intel Cluster-on-Die, and Intel
509  * Sub-NUMA Clustering have this.
510  */
511 static bool x86_has_numa_in_package;
512 
513 void set_cpu_sibling_map(int cpu)
514 {
515 	bool has_smt = smp_num_siblings > 1;
516 	bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
517 	struct cpuinfo_x86 *c = &cpu_data(cpu);
518 	struct cpuinfo_x86 *o;
519 	int i, threads;
520 
521 	cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
522 
523 	if (!has_mp) {
524 		cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
525 		cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
526 		cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
527 		c->booted_cores = 1;
528 		return;
529 	}
530 
531 	for_each_cpu(i, cpu_sibling_setup_mask) {
532 		o = &cpu_data(i);
533 
534 		if ((i == cpu) || (has_smt && match_smt(c, o)))
535 			link_mask(topology_sibling_cpumask, cpu, i);
536 
537 		if ((i == cpu) || (has_mp && match_llc(c, o)))
538 			link_mask(cpu_llc_shared_mask, cpu, i);
539 
540 	}
541 
542 	/*
543 	 * This needs a separate iteration over the cpus because we rely on all
544 	 * topology_sibling_cpumask links to be set-up.
545 	 */
546 	for_each_cpu(i, cpu_sibling_setup_mask) {
547 		o = &cpu_data(i);
548 
549 		if ((i == cpu) || (has_mp && match_pkg(c, o))) {
550 			link_mask(topology_core_cpumask, cpu, i);
551 
552 			/*
553 			 *  Does this new cpu bringup a new core?
554 			 */
555 			if (cpumask_weight(
556 			    topology_sibling_cpumask(cpu)) == 1) {
557 				/*
558 				 * for each core in package, increment
559 				 * the booted_cores for this new cpu
560 				 */
561 				if (cpumask_first(
562 				    topology_sibling_cpumask(i)) == i)
563 					c->booted_cores++;
564 				/*
565 				 * increment the core count for all
566 				 * the other cpus in this package
567 				 */
568 				if (i != cpu)
569 					cpu_data(i).booted_cores++;
570 			} else if (i != cpu && !c->booted_cores)
571 				c->booted_cores = cpu_data(i).booted_cores;
572 		}
573 		if (match_pkg(c, o) && !topology_same_node(c, o))
574 			x86_has_numa_in_package = true;
575 	}
576 
577 	threads = cpumask_weight(topology_sibling_cpumask(cpu));
578 	if (threads > __max_smt_threads)
579 		__max_smt_threads = threads;
580 }
581 
582 /* maps the cpu to the sched domain representing multi-core */
583 const struct cpumask *cpu_coregroup_mask(int cpu)
584 {
585 	return cpu_llc_shared_mask(cpu);
586 }
587 
588 static void impress_friends(void)
589 {
590 	int cpu;
591 	unsigned long bogosum = 0;
592 	/*
593 	 * Allow the user to impress friends.
594 	 */
595 	pr_debug("Before bogomips\n");
596 	for_each_possible_cpu(cpu)
597 		if (cpumask_test_cpu(cpu, cpu_callout_mask))
598 			bogosum += cpu_data(cpu).loops_per_jiffy;
599 	pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n",
600 		num_online_cpus(),
601 		bogosum/(500000/HZ),
602 		(bogosum/(5000/HZ))%100);
603 
604 	pr_debug("Before bogocount - setting activated=1\n");
605 }
606 
607 void __inquire_remote_apic(int apicid)
608 {
609 	unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
610 	const char * const names[] = { "ID", "VERSION", "SPIV" };
611 	int timeout;
612 	u32 status;
613 
614 	pr_info("Inquiring remote APIC 0x%x...\n", apicid);
615 
616 	for (i = 0; i < ARRAY_SIZE(regs); i++) {
617 		pr_info("... APIC 0x%x %s: ", apicid, names[i]);
618 
619 		/*
620 		 * Wait for idle.
621 		 */
622 		status = safe_apic_wait_icr_idle();
623 		if (status)
624 			pr_cont("a previous APIC delivery may have failed\n");
625 
626 		apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
627 
628 		timeout = 0;
629 		do {
630 			udelay(100);
631 			status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
632 		} while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
633 
634 		switch (status) {
635 		case APIC_ICR_RR_VALID:
636 			status = apic_read(APIC_RRR);
637 			pr_cont("%08x\n", status);
638 			break;
639 		default:
640 			pr_cont("failed\n");
641 		}
642 	}
643 }
644 
645 /*
646  * The Multiprocessor Specification 1.4 (1997) example code suggests
647  * that there should be a 10ms delay between the BSP asserting INIT
648  * and de-asserting INIT, when starting a remote processor.
649  * But that slows boot and resume on modern processors, which include
650  * many cores and don't require that delay.
651  *
652  * Cmdline "init_cpu_udelay=" is available to over-ride this delay.
653  * Modern processor families are quirked to remove the delay entirely.
654  */
655 #define UDELAY_10MS_DEFAULT 10000
656 
657 static unsigned int init_udelay = UINT_MAX;
658 
659 static int __init cpu_init_udelay(char *str)
660 {
661 	get_option(&str, &init_udelay);
662 
663 	return 0;
664 }
665 early_param("cpu_init_udelay", cpu_init_udelay);
666 
667 static void __init smp_quirk_init_udelay(void)
668 {
669 	/* if cmdline changed it from default, leave it alone */
670 	if (init_udelay != UINT_MAX)
671 		return;
672 
673 	/* if modern processor, use no delay */
674 	if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
675 	    ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && (boot_cpu_data.x86 >= 0x18)) ||
676 	    ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
677 		init_udelay = 0;
678 		return;
679 	}
680 	/* else, use legacy delay */
681 	init_udelay = UDELAY_10MS_DEFAULT;
682 }
683 
684 /*
685  * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
686  * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
687  * won't ... remember to clear down the APIC, etc later.
688  */
689 int
690 wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
691 {
692 	unsigned long send_status, accept_status = 0;
693 	int maxlvt;
694 
695 	/* Target chip */
696 	/* Boot on the stack */
697 	/* Kick the second */
698 	apic_icr_write(APIC_DM_NMI | apic->dest_logical, apicid);
699 
700 	pr_debug("Waiting for send to finish...\n");
701 	send_status = safe_apic_wait_icr_idle();
702 
703 	/*
704 	 * Give the other CPU some time to accept the IPI.
705 	 */
706 	udelay(200);
707 	if (APIC_INTEGRATED(boot_cpu_apic_version)) {
708 		maxlvt = lapic_get_maxlvt();
709 		if (maxlvt > 3)			/* Due to the Pentium erratum 3AP.  */
710 			apic_write(APIC_ESR, 0);
711 		accept_status = (apic_read(APIC_ESR) & 0xEF);
712 	}
713 	pr_debug("NMI sent\n");
714 
715 	if (send_status)
716 		pr_err("APIC never delivered???\n");
717 	if (accept_status)
718 		pr_err("APIC delivery error (%lx)\n", accept_status);
719 
720 	return (send_status | accept_status);
721 }
722 
723 static int
724 wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
725 {
726 	unsigned long send_status = 0, accept_status = 0;
727 	int maxlvt, num_starts, j;
728 
729 	maxlvt = lapic_get_maxlvt();
730 
731 	/*
732 	 * Be paranoid about clearing APIC errors.
733 	 */
734 	if (APIC_INTEGRATED(boot_cpu_apic_version)) {
735 		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
736 			apic_write(APIC_ESR, 0);
737 		apic_read(APIC_ESR);
738 	}
739 
740 	pr_debug("Asserting INIT\n");
741 
742 	/*
743 	 * Turn INIT on target chip
744 	 */
745 	/*
746 	 * Send IPI
747 	 */
748 	apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
749 		       phys_apicid);
750 
751 	pr_debug("Waiting for send to finish...\n");
752 	send_status = safe_apic_wait_icr_idle();
753 
754 	udelay(init_udelay);
755 
756 	pr_debug("Deasserting INIT\n");
757 
758 	/* Target chip */
759 	/* Send IPI */
760 	apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
761 
762 	pr_debug("Waiting for send to finish...\n");
763 	send_status = safe_apic_wait_icr_idle();
764 
765 	mb();
766 
767 	/*
768 	 * Should we send STARTUP IPIs ?
769 	 *
770 	 * Determine this based on the APIC version.
771 	 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
772 	 */
773 	if (APIC_INTEGRATED(boot_cpu_apic_version))
774 		num_starts = 2;
775 	else
776 		num_starts = 0;
777 
778 	/*
779 	 * Run STARTUP IPI loop.
780 	 */
781 	pr_debug("#startup loops: %d\n", num_starts);
782 
783 	for (j = 1; j <= num_starts; j++) {
784 		pr_debug("Sending STARTUP #%d\n", j);
785 		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
786 			apic_write(APIC_ESR, 0);
787 		apic_read(APIC_ESR);
788 		pr_debug("After apic_write\n");
789 
790 		/*
791 		 * STARTUP IPI
792 		 */
793 
794 		/* Target chip */
795 		/* Boot on the stack */
796 		/* Kick the second */
797 		apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
798 			       phys_apicid);
799 
800 		/*
801 		 * Give the other CPU some time to accept the IPI.
802 		 */
803 		if (init_udelay == 0)
804 			udelay(10);
805 		else
806 			udelay(300);
807 
808 		pr_debug("Startup point 1\n");
809 
810 		pr_debug("Waiting for send to finish...\n");
811 		send_status = safe_apic_wait_icr_idle();
812 
813 		/*
814 		 * Give the other CPU some time to accept the IPI.
815 		 */
816 		if (init_udelay == 0)
817 			udelay(10);
818 		else
819 			udelay(200);
820 
821 		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
822 			apic_write(APIC_ESR, 0);
823 		accept_status = (apic_read(APIC_ESR) & 0xEF);
824 		if (send_status || accept_status)
825 			break;
826 	}
827 	pr_debug("After Startup\n");
828 
829 	if (send_status)
830 		pr_err("APIC never delivered???\n");
831 	if (accept_status)
832 		pr_err("APIC delivery error (%lx)\n", accept_status);
833 
834 	return (send_status | accept_status);
835 }
836 
837 /* reduce the number of lines printed when booting a large cpu count system */
838 static void announce_cpu(int cpu, int apicid)
839 {
840 	static int current_node = NUMA_NO_NODE;
841 	int node = early_cpu_to_node(cpu);
842 	static int width, node_width;
843 
844 	if (!width)
845 		width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */
846 
847 	if (!node_width)
848 		node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */
849 
850 	if (cpu == 1)
851 		printk(KERN_INFO "x86: Booting SMP configuration:\n");
852 
853 	if (system_state < SYSTEM_RUNNING) {
854 		if (node != current_node) {
855 			if (current_node > (-1))
856 				pr_cont("\n");
857 			current_node = node;
858 
859 			printk(KERN_INFO ".... node %*s#%d, CPUs:  ",
860 			       node_width - num_digits(node), " ", node);
861 		}
862 
863 		/* Add padding for the BSP */
864 		if (cpu == 1)
865 			pr_cont("%*s", width + 1, " ");
866 
867 		pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu);
868 
869 	} else
870 		pr_info("Booting Node %d Processor %d APIC 0x%x\n",
871 			node, cpu, apicid);
872 }
873 
874 static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs)
875 {
876 	int cpu;
877 
878 	cpu = smp_processor_id();
879 	if (cpu == 0 && !cpu_online(cpu) && enable_start_cpu0)
880 		return NMI_HANDLED;
881 
882 	return NMI_DONE;
883 }
884 
885 /*
886  * Wake up AP by INIT, INIT, STARTUP sequence.
887  *
888  * Instead of waiting for STARTUP after INITs, BSP will execute the BIOS
889  * boot-strap code which is not a desired behavior for waking up BSP. To
890  * void the boot-strap code, wake up CPU0 by NMI instead.
891  *
892  * This works to wake up soft offlined CPU0 only. If CPU0 is hard offlined
893  * (i.e. physically hot removed and then hot added), NMI won't wake it up.
894  * We'll change this code in the future to wake up hard offlined CPU0 if
895  * real platform and request are available.
896  */
897 static int
898 wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
899 	       int *cpu0_nmi_registered)
900 {
901 	int id;
902 	int boot_error;
903 
904 	preempt_disable();
905 
906 	/*
907 	 * Wake up AP by INIT, INIT, STARTUP sequence.
908 	 */
909 	if (cpu) {
910 		boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
911 		goto out;
912 	}
913 
914 	/*
915 	 * Wake up BSP by nmi.
916 	 *
917 	 * Register a NMI handler to help wake up CPU0.
918 	 */
919 	boot_error = register_nmi_handler(NMI_LOCAL,
920 					  wakeup_cpu0_nmi, 0, "wake_cpu0");
921 
922 	if (!boot_error) {
923 		enable_start_cpu0 = 1;
924 		*cpu0_nmi_registered = 1;
925 		if (apic->dest_logical == APIC_DEST_LOGICAL)
926 			id = cpu0_logical_apicid;
927 		else
928 			id = apicid;
929 		boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip);
930 	}
931 
932 out:
933 	preempt_enable();
934 
935 	return boot_error;
936 }
937 
938 int common_cpu_up(unsigned int cpu, struct task_struct *idle)
939 {
940 	int ret;
941 
942 	/* Just in case we booted with a single CPU. */
943 	alternatives_enable_smp();
944 
945 	per_cpu(current_task, cpu) = idle;
946 
947 	/* Initialize the interrupt stack(s) */
948 	ret = irq_init_percpu_irqstack(cpu);
949 	if (ret)
950 		return ret;
951 
952 #ifdef CONFIG_X86_32
953 	/* Stack for startup_32 can be just as for start_secondary onwards */
954 	per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle);
955 #else
956 	initial_gs = per_cpu_offset(cpu);
957 #endif
958 	return 0;
959 }
960 
961 /*
962  * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
963  * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
964  * Returns zero if CPU booted OK, else error code from
965  * ->wakeup_secondary_cpu.
966  */
967 static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
968 		       int *cpu0_nmi_registered)
969 {
970 	volatile u32 *trampoline_status =
971 		(volatile u32 *) __va(real_mode_header->trampoline_status);
972 	/* start_ip had better be page-aligned! */
973 	unsigned long start_ip = real_mode_header->trampoline_start;
974 
975 	unsigned long boot_error = 0;
976 	unsigned long timeout;
977 
978 	idle->thread.sp = (unsigned long)task_pt_regs(idle);
979 	early_gdt_descr.address = (unsigned long)get_cpu_gdt_rw(cpu);
980 	initial_code = (unsigned long)start_secondary;
981 	initial_stack  = idle->thread.sp;
982 
983 	/* Enable the espfix hack for this CPU */
984 	init_espfix_ap(cpu);
985 
986 	/* So we see what's up */
987 	announce_cpu(cpu, apicid);
988 
989 	/*
990 	 * This grunge runs the startup process for
991 	 * the targeted processor.
992 	 */
993 
994 	if (x86_platform.legacy.warm_reset) {
995 
996 		pr_debug("Setting warm reset code and vector.\n");
997 
998 		smpboot_setup_warm_reset_vector(start_ip);
999 		/*
1000 		 * Be paranoid about clearing APIC errors.
1001 		*/
1002 		if (APIC_INTEGRATED(boot_cpu_apic_version)) {
1003 			apic_write(APIC_ESR, 0);
1004 			apic_read(APIC_ESR);
1005 		}
1006 	}
1007 
1008 	/*
1009 	 * AP might wait on cpu_callout_mask in cpu_init() with
1010 	 * cpu_initialized_mask set if previous attempt to online
1011 	 * it timed-out. Clear cpu_initialized_mask so that after
1012 	 * INIT/SIPI it could start with a clean state.
1013 	 */
1014 	cpumask_clear_cpu(cpu, cpu_initialized_mask);
1015 	smp_mb();
1016 
1017 	/*
1018 	 * Wake up a CPU in difference cases:
1019 	 * - Use the method in the APIC driver if it's defined
1020 	 * Otherwise,
1021 	 * - Use an INIT boot APIC message for APs or NMI for BSP.
1022 	 */
1023 	if (apic->wakeup_secondary_cpu)
1024 		boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
1025 	else
1026 		boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
1027 						     cpu0_nmi_registered);
1028 
1029 	if (!boot_error) {
1030 		/*
1031 		 * Wait 10s total for first sign of life from AP
1032 		 */
1033 		boot_error = -1;
1034 		timeout = jiffies + 10*HZ;
1035 		while (time_before(jiffies, timeout)) {
1036 			if (cpumask_test_cpu(cpu, cpu_initialized_mask)) {
1037 				/*
1038 				 * Tell AP to proceed with initialization
1039 				 */
1040 				cpumask_set_cpu(cpu, cpu_callout_mask);
1041 				boot_error = 0;
1042 				break;
1043 			}
1044 			schedule();
1045 		}
1046 	}
1047 
1048 	if (!boot_error) {
1049 		/*
1050 		 * Wait till AP completes initial initialization
1051 		 */
1052 		while (!cpumask_test_cpu(cpu, cpu_callin_mask)) {
1053 			/*
1054 			 * Allow other tasks to run while we wait for the
1055 			 * AP to come online. This also gives a chance
1056 			 * for the MTRR work(triggered by the AP coming online)
1057 			 * to be completed in the stop machine context.
1058 			 */
1059 			schedule();
1060 		}
1061 	}
1062 
1063 	/* mark "stuck" area as not stuck */
1064 	*trampoline_status = 0;
1065 
1066 	if (x86_platform.legacy.warm_reset) {
1067 		/*
1068 		 * Cleanup possible dangling ends...
1069 		 */
1070 		smpboot_restore_warm_reset_vector();
1071 	}
1072 
1073 	return boot_error;
1074 }
1075 
1076 int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
1077 {
1078 	int apicid = apic->cpu_present_to_apicid(cpu);
1079 	int cpu0_nmi_registered = 0;
1080 	unsigned long flags;
1081 	int err, ret = 0;
1082 
1083 	lockdep_assert_irqs_enabled();
1084 
1085 	pr_debug("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
1086 
1087 	if (apicid == BAD_APICID ||
1088 	    !physid_isset(apicid, phys_cpu_present_map) ||
1089 	    !apic->apic_id_valid(apicid)) {
1090 		pr_err("%s: bad cpu %d\n", __func__, cpu);
1091 		return -EINVAL;
1092 	}
1093 
1094 	/*
1095 	 * Already booted CPU?
1096 	 */
1097 	if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
1098 		pr_debug("do_boot_cpu %d Already started\n", cpu);
1099 		return -ENOSYS;
1100 	}
1101 
1102 	/*
1103 	 * Save current MTRR state in case it was changed since early boot
1104 	 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
1105 	 */
1106 	mtrr_save_state();
1107 
1108 	/* x86 CPUs take themselves offline, so delayed offline is OK. */
1109 	err = cpu_check_up_prepare(cpu);
1110 	if (err && err != -EBUSY)
1111 		return err;
1112 
1113 	/* the FPU context is blank, nobody can own it */
1114 	per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
1115 
1116 	err = common_cpu_up(cpu, tidle);
1117 	if (err)
1118 		return err;
1119 
1120 	err = do_boot_cpu(apicid, cpu, tidle, &cpu0_nmi_registered);
1121 	if (err) {
1122 		pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
1123 		ret = -EIO;
1124 		goto unreg_nmi;
1125 	}
1126 
1127 	/*
1128 	 * Check TSC synchronization with the AP (keep irqs disabled
1129 	 * while doing so):
1130 	 */
1131 	local_irq_save(flags);
1132 	check_tsc_sync_source(cpu);
1133 	local_irq_restore(flags);
1134 
1135 	while (!cpu_online(cpu)) {
1136 		cpu_relax();
1137 		touch_nmi_watchdog();
1138 	}
1139 
1140 unreg_nmi:
1141 	/*
1142 	 * Clean up the nmi handler. Do this after the callin and callout sync
1143 	 * to avoid impact of possible long unregister time.
1144 	 */
1145 	if (cpu0_nmi_registered)
1146 		unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");
1147 
1148 	return ret;
1149 }
1150 
1151 /**
1152  * arch_disable_smp_support() - disables SMP support for x86 at runtime
1153  */
1154 void arch_disable_smp_support(void)
1155 {
1156 	disable_ioapic_support();
1157 }
1158 
1159 /*
1160  * Fall back to non SMP mode after errors.
1161  *
1162  * RED-PEN audit/test this more. I bet there is more state messed up here.
1163  */
1164 static __init void disable_smp(void)
1165 {
1166 	pr_info("SMP disabled\n");
1167 
1168 	disable_ioapic_support();
1169 
1170 	init_cpu_present(cpumask_of(0));
1171 	init_cpu_possible(cpumask_of(0));
1172 
1173 	if (smp_found_config)
1174 		physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1175 	else
1176 		physid_set_mask_of_physid(0, &phys_cpu_present_map);
1177 	cpumask_set_cpu(0, topology_sibling_cpumask(0));
1178 	cpumask_set_cpu(0, topology_core_cpumask(0));
1179 }
1180 
1181 /*
1182  * Various sanity checks.
1183  */
1184 static void __init smp_sanity_check(void)
1185 {
1186 	preempt_disable();
1187 
1188 #if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
1189 	if (def_to_bigsmp && nr_cpu_ids > 8) {
1190 		unsigned int cpu;
1191 		unsigned nr;
1192 
1193 		pr_warn("More than 8 CPUs detected - skipping them\n"
1194 			"Use CONFIG_X86_BIGSMP\n");
1195 
1196 		nr = 0;
1197 		for_each_present_cpu(cpu) {
1198 			if (nr >= 8)
1199 				set_cpu_present(cpu, false);
1200 			nr++;
1201 		}
1202 
1203 		nr = 0;
1204 		for_each_possible_cpu(cpu) {
1205 			if (nr >= 8)
1206 				set_cpu_possible(cpu, false);
1207 			nr++;
1208 		}
1209 
1210 		nr_cpu_ids = 8;
1211 	}
1212 #endif
1213 
1214 	if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
1215 		pr_warn("weird, boot CPU (#%d) not listed by the BIOS\n",
1216 			hard_smp_processor_id());
1217 
1218 		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1219 	}
1220 
1221 	/*
1222 	 * Should not be necessary because the MP table should list the boot
1223 	 * CPU too, but we do it for the sake of robustness anyway.
1224 	 */
1225 	if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
1226 		pr_notice("weird, boot CPU (#%d) not listed by the BIOS\n",
1227 			  boot_cpu_physical_apicid);
1228 		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1229 	}
1230 	preempt_enable();
1231 }
1232 
1233 static void __init smp_cpu_index_default(void)
1234 {
1235 	int i;
1236 	struct cpuinfo_x86 *c;
1237 
1238 	for_each_possible_cpu(i) {
1239 		c = &cpu_data(i);
1240 		/* mark all to hotplug */
1241 		c->cpu_index = nr_cpu_ids;
1242 	}
1243 }
1244 
1245 static void __init smp_get_logical_apicid(void)
1246 {
1247 	if (x2apic_mode)
1248 		cpu0_logical_apicid = apic_read(APIC_LDR);
1249 	else
1250 		cpu0_logical_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
1251 }
1252 
1253 /*
1254  * Prepare for SMP bootup.
1255  * @max_cpus: configured maximum number of CPUs, It is a legacy parameter
1256  *            for common interface support.
1257  */
1258 void __init native_smp_prepare_cpus(unsigned int max_cpus)
1259 {
1260 	unsigned int i;
1261 
1262 	smp_cpu_index_default();
1263 
1264 	/*
1265 	 * Setup boot CPU information
1266 	 */
1267 	smp_store_boot_cpu_info(); /* Final full version of the data */
1268 	cpumask_copy(cpu_callin_mask, cpumask_of(0));
1269 	mb();
1270 
1271 	for_each_possible_cpu(i) {
1272 		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1273 		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1274 		zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
1275 	}
1276 
1277 	/*
1278 	 * Set 'default' x86 topology, this matches default_topology() in that
1279 	 * it has NUMA nodes as a topology level. See also
1280 	 * native_smp_cpus_done().
1281 	 *
1282 	 * Must be done before set_cpus_sibling_map() is ran.
1283 	 */
1284 	set_sched_topology(x86_topology);
1285 
1286 	set_cpu_sibling_map(0);
1287 
1288 	smp_sanity_check();
1289 
1290 	switch (apic_intr_mode) {
1291 	case APIC_PIC:
1292 	case APIC_VIRTUAL_WIRE_NO_CONFIG:
1293 		disable_smp();
1294 		return;
1295 	case APIC_SYMMETRIC_IO_NO_ROUTING:
1296 		disable_smp();
1297 		/* Setup local timer */
1298 		x86_init.timers.setup_percpu_clockev();
1299 		return;
1300 	case APIC_VIRTUAL_WIRE:
1301 	case APIC_SYMMETRIC_IO:
1302 		break;
1303 	}
1304 
1305 	/* Setup local timer */
1306 	x86_init.timers.setup_percpu_clockev();
1307 
1308 	smp_get_logical_apicid();
1309 
1310 	pr_info("CPU0: ");
1311 	print_cpu_info(&cpu_data(0));
1312 
1313 	native_pv_lock_init();
1314 
1315 	uv_system_init();
1316 
1317 	set_mtrr_aps_delayed_init();
1318 
1319 	smp_quirk_init_udelay();
1320 
1321 	speculative_store_bypass_ht_init();
1322 }
1323 
1324 void arch_enable_nonboot_cpus_begin(void)
1325 {
1326 	set_mtrr_aps_delayed_init();
1327 }
1328 
1329 void arch_enable_nonboot_cpus_end(void)
1330 {
1331 	mtrr_aps_init();
1332 }
1333 
1334 /*
1335  * Early setup to make printk work.
1336  */
1337 void __init native_smp_prepare_boot_cpu(void)
1338 {
1339 	int me = smp_processor_id();
1340 	switch_to_new_gdt(me);
1341 	/* already set me in cpu_online_mask in boot_cpu_init() */
1342 	cpumask_set_cpu(me, cpu_callout_mask);
1343 	cpu_set_state_online(me);
1344 }
1345 
1346 void __init calculate_max_logical_packages(void)
1347 {
1348 	int ncpus;
1349 
1350 	/*
1351 	 * Today neither Intel nor AMD support heterogenous systems so
1352 	 * extrapolate the boot cpu's data to all packages.
1353 	 */
1354 	ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
1355 	__max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
1356 	pr_info("Max logical packages: %u\n", __max_logical_packages);
1357 }
1358 
1359 void __init native_smp_cpus_done(unsigned int max_cpus)
1360 {
1361 	pr_debug("Boot done\n");
1362 
1363 	calculate_max_logical_packages();
1364 
1365 	if (x86_has_numa_in_package)
1366 		set_sched_topology(x86_numa_in_package_topology);
1367 
1368 	nmi_selftest();
1369 	impress_friends();
1370 	mtrr_aps_init();
1371 }
1372 
1373 static int __initdata setup_possible_cpus = -1;
1374 static int __init _setup_possible_cpus(char *str)
1375 {
1376 	get_option(&str, &setup_possible_cpus);
1377 	return 0;
1378 }
1379 early_param("possible_cpus", _setup_possible_cpus);
1380 
1381 
1382 /*
1383  * cpu_possible_mask should be static, it cannot change as cpu's
1384  * are onlined, or offlined. The reason is per-cpu data-structures
1385  * are allocated by some modules at init time, and dont expect to
1386  * do this dynamically on cpu arrival/departure.
1387  * cpu_present_mask on the other hand can change dynamically.
1388  * In case when cpu_hotplug is not compiled, then we resort to current
1389  * behaviour, which is cpu_possible == cpu_present.
1390  * - Ashok Raj
1391  *
1392  * Three ways to find out the number of additional hotplug CPUs:
1393  * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
1394  * - The user can overwrite it with possible_cpus=NUM
1395  * - Otherwise don't reserve additional CPUs.
1396  * We do this because additional CPUs waste a lot of memory.
1397  * -AK
1398  */
1399 __init void prefill_possible_map(void)
1400 {
1401 	int i, possible;
1402 
1403 	/* No boot processor was found in mptable or ACPI MADT */
1404 	if (!num_processors) {
1405 		if (boot_cpu_has(X86_FEATURE_APIC)) {
1406 			int apicid = boot_cpu_physical_apicid;
1407 			int cpu = hard_smp_processor_id();
1408 
1409 			pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
1410 
1411 			/* Make sure boot cpu is enumerated */
1412 			if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
1413 			    apic->apic_id_valid(apicid))
1414 				generic_processor_info(apicid, boot_cpu_apic_version);
1415 		}
1416 
1417 		if (!num_processors)
1418 			num_processors = 1;
1419 	}
1420 
1421 	i = setup_max_cpus ?: 1;
1422 	if (setup_possible_cpus == -1) {
1423 		possible = num_processors;
1424 #ifdef CONFIG_HOTPLUG_CPU
1425 		if (setup_max_cpus)
1426 			possible += disabled_cpus;
1427 #else
1428 		if (possible > i)
1429 			possible = i;
1430 #endif
1431 	} else
1432 		possible = setup_possible_cpus;
1433 
1434 	total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1435 
1436 	/* nr_cpu_ids could be reduced via nr_cpus= */
1437 	if (possible > nr_cpu_ids) {
1438 		pr_warn("%d Processors exceeds NR_CPUS limit of %u\n",
1439 			possible, nr_cpu_ids);
1440 		possible = nr_cpu_ids;
1441 	}
1442 
1443 #ifdef CONFIG_HOTPLUG_CPU
1444 	if (!setup_max_cpus)
1445 #endif
1446 	if (possible > i) {
1447 		pr_warn("%d Processors exceeds max_cpus limit of %u\n",
1448 			possible, setup_max_cpus);
1449 		possible = i;
1450 	}
1451 
1452 	nr_cpu_ids = possible;
1453 
1454 	pr_info("Allowing %d CPUs, %d hotplug CPUs\n",
1455 		possible, max_t(int, possible - num_processors, 0));
1456 
1457 	reset_cpu_possible_mask();
1458 
1459 	for (i = 0; i < possible; i++)
1460 		set_cpu_possible(i, true);
1461 }
1462 
1463 #ifdef CONFIG_HOTPLUG_CPU
1464 
1465 /* Recompute SMT state for all CPUs on offline */
1466 static void recompute_smt_state(void)
1467 {
1468 	int max_threads, cpu;
1469 
1470 	max_threads = 0;
1471 	for_each_online_cpu (cpu) {
1472 		int threads = cpumask_weight(topology_sibling_cpumask(cpu));
1473 
1474 		if (threads > max_threads)
1475 			max_threads = threads;
1476 	}
1477 	__max_smt_threads = max_threads;
1478 }
1479 
1480 static void remove_siblinginfo(int cpu)
1481 {
1482 	int sibling;
1483 	struct cpuinfo_x86 *c = &cpu_data(cpu);
1484 
1485 	for_each_cpu(sibling, topology_core_cpumask(cpu)) {
1486 		cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
1487 		/*/
1488 		 * last thread sibling in this cpu core going down
1489 		 */
1490 		if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1)
1491 			cpu_data(sibling).booted_cores--;
1492 	}
1493 
1494 	for_each_cpu(sibling, topology_sibling_cpumask(cpu))
1495 		cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
1496 	for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
1497 		cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
1498 	cpumask_clear(cpu_llc_shared_mask(cpu));
1499 	cpumask_clear(topology_sibling_cpumask(cpu));
1500 	cpumask_clear(topology_core_cpumask(cpu));
1501 	c->cpu_core_id = 0;
1502 	c->booted_cores = 0;
1503 	cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1504 	recompute_smt_state();
1505 }
1506 
1507 static void remove_cpu_from_maps(int cpu)
1508 {
1509 	set_cpu_online(cpu, false);
1510 	cpumask_clear_cpu(cpu, cpu_callout_mask);
1511 	cpumask_clear_cpu(cpu, cpu_callin_mask);
1512 	/* was set by cpu_init() */
1513 	cpumask_clear_cpu(cpu, cpu_initialized_mask);
1514 	numa_remove_cpu(cpu);
1515 }
1516 
1517 void cpu_disable_common(void)
1518 {
1519 	int cpu = smp_processor_id();
1520 
1521 	remove_siblinginfo(cpu);
1522 
1523 	/* It's now safe to remove this processor from the online map */
1524 	lock_vector_lock();
1525 	remove_cpu_from_maps(cpu);
1526 	unlock_vector_lock();
1527 	fixup_irqs();
1528 	lapic_offline();
1529 }
1530 
1531 int native_cpu_disable(void)
1532 {
1533 	int ret;
1534 
1535 	ret = lapic_can_unplug_cpu();
1536 	if (ret)
1537 		return ret;
1538 
1539 	clear_local_APIC();
1540 	cpu_disable_common();
1541 
1542 	return 0;
1543 }
1544 
1545 int common_cpu_die(unsigned int cpu)
1546 {
1547 	int ret = 0;
1548 
1549 	/* We don't do anything here: idle task is faking death itself. */
1550 
1551 	/* They ack this in play_dead() by setting CPU_DEAD */
1552 	if (cpu_wait_death(cpu, 5)) {
1553 		if (system_state == SYSTEM_RUNNING)
1554 			pr_info("CPU %u is now offline\n", cpu);
1555 	} else {
1556 		pr_err("CPU %u didn't die...\n", cpu);
1557 		ret = -1;
1558 	}
1559 
1560 	return ret;
1561 }
1562 
1563 void native_cpu_die(unsigned int cpu)
1564 {
1565 	common_cpu_die(cpu);
1566 }
1567 
1568 void play_dead_common(void)
1569 {
1570 	idle_task_exit();
1571 
1572 	/* Ack it */
1573 	(void)cpu_report_death();
1574 
1575 	/*
1576 	 * With physical CPU hotplug, we should halt the cpu
1577 	 */
1578 	local_irq_disable();
1579 }
1580 
1581 static bool wakeup_cpu0(void)
1582 {
1583 	if (smp_processor_id() == 0 && enable_start_cpu0)
1584 		return true;
1585 
1586 	return false;
1587 }
1588 
1589 /*
1590  * We need to flush the caches before going to sleep, lest we have
1591  * dirty data in our caches when we come back up.
1592  */
1593 static inline void mwait_play_dead(void)
1594 {
1595 	unsigned int eax, ebx, ecx, edx;
1596 	unsigned int highest_cstate = 0;
1597 	unsigned int highest_subcstate = 0;
1598 	void *mwait_ptr;
1599 	int i;
1600 
1601 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1602 	    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
1603 		return;
1604 	if (!this_cpu_has(X86_FEATURE_MWAIT))
1605 		return;
1606 	if (!this_cpu_has(X86_FEATURE_CLFLUSH))
1607 		return;
1608 	if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
1609 		return;
1610 
1611 	eax = CPUID_MWAIT_LEAF;
1612 	ecx = 0;
1613 	native_cpuid(&eax, &ebx, &ecx, &edx);
1614 
1615 	/*
1616 	 * eax will be 0 if EDX enumeration is not valid.
1617 	 * Initialized below to cstate, sub_cstate value when EDX is valid.
1618 	 */
1619 	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
1620 		eax = 0;
1621 	} else {
1622 		edx >>= MWAIT_SUBSTATE_SIZE;
1623 		for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
1624 			if (edx & MWAIT_SUBSTATE_MASK) {
1625 				highest_cstate = i;
1626 				highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
1627 			}
1628 		}
1629 		eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
1630 			(highest_subcstate - 1);
1631 	}
1632 
1633 	/*
1634 	 * This should be a memory location in a cache line which is
1635 	 * unlikely to be touched by other processors.  The actual
1636 	 * content is immaterial as it is not actually modified in any way.
1637 	 */
1638 	mwait_ptr = &current_thread_info()->flags;
1639 
1640 	wbinvd();
1641 
1642 	while (1) {
1643 		/*
1644 		 * The CLFLUSH is a workaround for erratum AAI65 for
1645 		 * the Xeon 7400 series.  It's not clear it is actually
1646 		 * needed, but it should be harmless in either case.
1647 		 * The WBINVD is insufficient due to the spurious-wakeup
1648 		 * case where we return around the loop.
1649 		 */
1650 		mb();
1651 		clflush(mwait_ptr);
1652 		mb();
1653 		__monitor(mwait_ptr, 0, 0);
1654 		mb();
1655 		__mwait(eax, 0);
1656 		/*
1657 		 * If NMI wants to wake up CPU0, start CPU0.
1658 		 */
1659 		if (wakeup_cpu0())
1660 			start_cpu0();
1661 	}
1662 }
1663 
1664 void hlt_play_dead(void)
1665 {
1666 	if (__this_cpu_read(cpu_info.x86) >= 4)
1667 		wbinvd();
1668 
1669 	while (1) {
1670 		native_halt();
1671 		/*
1672 		 * If NMI wants to wake up CPU0, start CPU0.
1673 		 */
1674 		if (wakeup_cpu0())
1675 			start_cpu0();
1676 	}
1677 }
1678 
1679 void native_play_dead(void)
1680 {
1681 	play_dead_common();
1682 	tboot_shutdown(TB_SHUTDOWN_WFS);
1683 
1684 	mwait_play_dead();	/* Only returns on failure */
1685 	if (cpuidle_play_dead())
1686 		hlt_play_dead();
1687 }
1688 
1689 #else /* ... !CONFIG_HOTPLUG_CPU */
1690 int native_cpu_disable(void)
1691 {
1692 	return -ENOSYS;
1693 }
1694 
1695 void native_cpu_die(unsigned int cpu)
1696 {
1697 	/* We said "no" in __cpu_disable */
1698 	BUG();
1699 }
1700 
1701 void native_play_dead(void)
1702 {
1703 	BUG();
1704 }
1705 
1706 #endif
1707