xref: /openbmc/linux/arch/x86/kernel/smpboot.c (revision b6dcefde)
1 /*
2  *	x86 SMP booting functions
3  *
4  *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
5  *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
6  *	Copyright 2001 Andi Kleen, SuSE Labs.
7  *
8  *	Much of the core SMP work is based on previous work by Thomas Radke, to
9  *	whom a great many thanks are extended.
10  *
11  *	Thanks to Intel for making available several different Pentium,
12  *	Pentium Pro and Pentium-II/Xeon MP machines.
13  *	Original development of Linux SMP code supported by Caldera.
14  *
15  *	This code is released under the GNU General Public License version 2 or
16  *	later.
17  *
18  *	Fixes
19  *		Felix Koop	:	NR_CPUS used properly
20  *		Jose Renau	:	Handle single CPU case.
21  *		Alan Cox	:	By repeated request 8) - Total BogoMIPS report.
22  *		Greg Wright	:	Fix for kernel stacks panic.
23  *		Erich Boleyn	:	MP v1.4 and additional changes.
24  *	Matthias Sattler	:	Changes for 2.1 kernel map.
25  *	Michel Lespinasse	:	Changes for 2.1 kernel map.
26  *	Michael Chastain	:	Change trampoline.S to gnu as.
27  *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine
28  *		Ingo Molnar	:	Added APIC timers, based on code
29  *					from Jose Renau
30  *		Ingo Molnar	:	various cleanups and rewrites
31  *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
32  *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
33  *	Andi Kleen		:	Changed for SMP boot into long mode.
34  *		Martin J. Bligh	: 	Added support for multi-quad systems
35  *		Dave Jones	:	Report invalid combinations of Athlon CPUs.
36  *		Rusty Russell	:	Hacked into shape for new "hotplug" boot process.
37  *      Andi Kleen              :       Converted to new state machine.
38  *	Ashok Raj		: 	CPU hotplug support
39  *	Glauber Costa		:	i386 and x86_64 integration
40  */
41 
42 #include <linux/init.h>
43 #include <linux/smp.h>
44 #include <linux/module.h>
45 #include <linux/sched.h>
46 #include <linux/percpu.h>
47 #include <linux/bootmem.h>
48 #include <linux/err.h>
49 #include <linux/nmi.h>
50 #include <linux/tboot.h>
51 
52 #include <asm/acpi.h>
53 #include <asm/desc.h>
54 #include <asm/nmi.h>
55 #include <asm/irq.h>
56 #include <asm/idle.h>
57 #include <asm/trampoline.h>
58 #include <asm/cpu.h>
59 #include <asm/numa.h>
60 #include <asm/pgtable.h>
61 #include <asm/tlbflush.h>
62 #include <asm/mtrr.h>
63 #include <asm/vmi.h>
64 #include <asm/apic.h>
65 #include <asm/setup.h>
66 #include <asm/uv/uv.h>
67 #include <linux/mc146818rtc.h>
68 
69 #include <asm/smpboot_hooks.h>
70 
71 #ifdef CONFIG_X86_32
72 u8 apicid_2_node[MAX_APICID];
73 static int low_mappings;
74 #endif
75 
76 /* State of each CPU */
77 DEFINE_PER_CPU(int, cpu_state) = { 0 };
78 
79 /* Store all idle threads, this can be reused instead of creating
80 * a new thread. Also avoids complicated thread destroy functionality
81 * for idle threads.
82 */
83 #ifdef CONFIG_HOTPLUG_CPU
84 /*
85  * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
86  * removed after init for !CONFIG_HOTPLUG_CPU.
87  */
88 static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
89 #define get_idle_for_cpu(x)      (per_cpu(idle_thread_array, x))
90 #define set_idle_for_cpu(x, p)   (per_cpu(idle_thread_array, x) = (p))
91 #else
92 static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
93 #define get_idle_for_cpu(x)      (idle_thread_array[(x)])
94 #define set_idle_for_cpu(x, p)   (idle_thread_array[(x)] = (p))
95 #endif
96 
97 /* Number of siblings per CPU package */
98 int smp_num_siblings = 1;
99 EXPORT_SYMBOL(smp_num_siblings);
100 
101 /* Last level cache ID of each logical CPU */
102 DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
103 
104 /* representing HT siblings of each logical CPU */
105 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
106 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
107 
108 /* representing HT and core siblings of each logical CPU */
109 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
110 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
111 
112 /* Per CPU bogomips and other parameters */
113 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
114 EXPORT_PER_CPU_SYMBOL(cpu_info);
115 
116 atomic_t init_deasserted;
117 
118 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
119 /* which node each logical CPU is on */
120 int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
121 EXPORT_SYMBOL(cpu_to_node_map);
122 
123 /* set up a mapping between cpu and node. */
124 static void map_cpu_to_node(int cpu, int node)
125 {
126 	printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
127 	cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
128 	cpu_to_node_map[cpu] = node;
129 }
130 
131 /* undo a mapping between cpu and node. */
132 static void unmap_cpu_to_node(int cpu)
133 {
134 	int node;
135 
136 	printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
137 	for (node = 0; node < MAX_NUMNODES; node++)
138 		cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
139 	cpu_to_node_map[cpu] = 0;
140 }
141 #else /* !(CONFIG_NUMA && CONFIG_X86_32) */
142 #define map_cpu_to_node(cpu, node)	({})
143 #define unmap_cpu_to_node(cpu)	({})
144 #endif
145 
146 #ifdef CONFIG_X86_32
147 static int boot_cpu_logical_apicid;
148 
149 u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
150 					{ [0 ... NR_CPUS-1] = BAD_APICID };
151 
152 static void map_cpu_to_logical_apicid(void)
153 {
154 	int cpu = smp_processor_id();
155 	int apicid = logical_smp_processor_id();
156 	int node = apic->apicid_to_node(apicid);
157 
158 	if (!node_online(node))
159 		node = first_online_node;
160 
161 	cpu_2_logical_apicid[cpu] = apicid;
162 	map_cpu_to_node(cpu, node);
163 }
164 
165 void numa_remove_cpu(int cpu)
166 {
167 	cpu_2_logical_apicid[cpu] = BAD_APICID;
168 	unmap_cpu_to_node(cpu);
169 }
170 #else
171 #define map_cpu_to_logical_apicid()  do {} while (0)
172 #endif
173 
174 /*
175  * Report back to the Boot Processor.
176  * Running on AP.
177  */
178 static void __cpuinit smp_callin(void)
179 {
180 	int cpuid, phys_id;
181 	unsigned long timeout;
182 
183 	/*
184 	 * If waken up by an INIT in an 82489DX configuration
185 	 * we may get here before an INIT-deassert IPI reaches
186 	 * our local APIC.  We have to wait for the IPI or we'll
187 	 * lock up on an APIC access.
188 	 */
189 	if (apic->wait_for_init_deassert)
190 		apic->wait_for_init_deassert(&init_deasserted);
191 
192 	/*
193 	 * (This works even if the APIC is not enabled.)
194 	 */
195 	phys_id = read_apic_id();
196 	cpuid = smp_processor_id();
197 	if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
198 		panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
199 					phys_id, cpuid);
200 	}
201 	pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
202 
203 	/*
204 	 * STARTUP IPIs are fragile beasts as they might sometimes
205 	 * trigger some glue motherboard logic. Complete APIC bus
206 	 * silence for 1 second, this overestimates the time the
207 	 * boot CPU is spending to send the up to 2 STARTUP IPIs
208 	 * by a factor of two. This should be enough.
209 	 */
210 
211 	/*
212 	 * Waiting 2s total for startup (udelay is not yet working)
213 	 */
214 	timeout = jiffies + 2*HZ;
215 	while (time_before(jiffies, timeout)) {
216 		/*
217 		 * Has the boot CPU finished it's STARTUP sequence?
218 		 */
219 		if (cpumask_test_cpu(cpuid, cpu_callout_mask))
220 			break;
221 		cpu_relax();
222 	}
223 
224 	if (!time_before(jiffies, timeout)) {
225 		panic("%s: CPU%d started up but did not get a callout!\n",
226 		      __func__, cpuid);
227 	}
228 
229 	/*
230 	 * the boot CPU has finished the init stage and is spinning
231 	 * on callin_map until we finish. We are free to set up this
232 	 * CPU, first the APIC. (this is probably redundant on most
233 	 * boards)
234 	 */
235 
236 	pr_debug("CALLIN, before setup_local_APIC().\n");
237 	if (apic->smp_callin_clear_local_apic)
238 		apic->smp_callin_clear_local_apic();
239 	setup_local_APIC();
240 	end_local_APIC_setup();
241 	map_cpu_to_logical_apicid();
242 
243 	notify_cpu_starting(cpuid);
244 	/*
245 	 * Get our bogomips.
246 	 *
247 	 * Need to enable IRQs because it can take longer and then
248 	 * the NMI watchdog might kill us.
249 	 */
250 	local_irq_enable();
251 	calibrate_delay();
252 	local_irq_disable();
253 	pr_debug("Stack at about %p\n", &cpuid);
254 
255 	/*
256 	 * Save our processor parameters
257 	 */
258 	smp_store_cpu_info(cpuid);
259 
260 	/*
261 	 * Allow the master to continue.
262 	 */
263 	cpumask_set_cpu(cpuid, cpu_callin_mask);
264 }
265 
266 /*
267  * Activate a secondary processor.
268  */
269 notrace static void __cpuinit start_secondary(void *unused)
270 {
271 	/*
272 	 * Don't put *anything* before cpu_init(), SMP booting is too
273 	 * fragile that we want to limit the things done here to the
274 	 * most necessary things.
275 	 */
276 	vmi_bringup();
277 	cpu_init();
278 	preempt_disable();
279 	smp_callin();
280 
281 	/* otherwise gcc will move up smp_processor_id before the cpu_init */
282 	barrier();
283 	/*
284 	 * Check TSC synchronization with the BP:
285 	 */
286 	check_tsc_sync_target();
287 
288 	if (nmi_watchdog == NMI_IO_APIC) {
289 		disable_8259A_irq(0);
290 		enable_NMI_through_LVT0();
291 		enable_8259A_irq(0);
292 	}
293 
294 #ifdef CONFIG_X86_32
295 	while (low_mappings)
296 		cpu_relax();
297 	__flush_tlb_all();
298 #endif
299 
300 	/* This must be done before setting cpu_online_mask */
301 	set_cpu_sibling_map(raw_smp_processor_id());
302 	wmb();
303 
304 	/*
305 	 * We need to hold call_lock, so there is no inconsistency
306 	 * between the time smp_call_function() determines number of
307 	 * IPI recipients, and the time when the determination is made
308 	 * for which cpus receive the IPI. Holding this
309 	 * lock helps us to not include this cpu in a currently in progress
310 	 * smp_call_function().
311 	 *
312 	 * We need to hold vector_lock so there the set of online cpus
313 	 * does not change while we are assigning vectors to cpus.  Holding
314 	 * this lock ensures we don't half assign or remove an irq from a cpu.
315 	 */
316 	ipi_call_lock();
317 	lock_vector_lock();
318 	__setup_vector_irq(smp_processor_id());
319 	set_cpu_online(smp_processor_id(), true);
320 	unlock_vector_lock();
321 	ipi_call_unlock();
322 	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
323 
324 	/* enable local interrupts */
325 	local_irq_enable();
326 
327 	x86_cpuinit.setup_percpu_clockev();
328 
329 	wmb();
330 	cpu_idle();
331 }
332 
333 #ifdef CONFIG_CPUMASK_OFFSTACK
334 /* In this case, llc_shared_map is a pointer to a cpumask. */
335 static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
336 				    const struct cpuinfo_x86 *src)
337 {
338 	struct cpumask *llc = dst->llc_shared_map;
339 	*dst = *src;
340 	dst->llc_shared_map = llc;
341 }
342 #else
343 static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
344 				    const struct cpuinfo_x86 *src)
345 {
346 	*dst = *src;
347 }
348 #endif /* CONFIG_CPUMASK_OFFSTACK */
349 
350 /*
351  * The bootstrap kernel entry code has set these up. Save them for
352  * a given CPU
353  */
354 
355 void __cpuinit smp_store_cpu_info(int id)
356 {
357 	struct cpuinfo_x86 *c = &cpu_data(id);
358 
359 	copy_cpuinfo_x86(c, &boot_cpu_data);
360 	c->cpu_index = id;
361 	if (id != 0)
362 		identify_secondary_cpu(c);
363 }
364 
365 
366 void __cpuinit set_cpu_sibling_map(int cpu)
367 {
368 	int i;
369 	struct cpuinfo_x86 *c = &cpu_data(cpu);
370 
371 	cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
372 
373 	if (smp_num_siblings > 1) {
374 		for_each_cpu(i, cpu_sibling_setup_mask) {
375 			struct cpuinfo_x86 *o = &cpu_data(i);
376 
377 			if (c->phys_proc_id == o->phys_proc_id &&
378 			    c->cpu_core_id == o->cpu_core_id) {
379 				cpumask_set_cpu(i, cpu_sibling_mask(cpu));
380 				cpumask_set_cpu(cpu, cpu_sibling_mask(i));
381 				cpumask_set_cpu(i, cpu_core_mask(cpu));
382 				cpumask_set_cpu(cpu, cpu_core_mask(i));
383 				cpumask_set_cpu(i, c->llc_shared_map);
384 				cpumask_set_cpu(cpu, o->llc_shared_map);
385 			}
386 		}
387 	} else {
388 		cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
389 	}
390 
391 	cpumask_set_cpu(cpu, c->llc_shared_map);
392 
393 	if (current_cpu_data.x86_max_cores == 1) {
394 		cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
395 		c->booted_cores = 1;
396 		return;
397 	}
398 
399 	for_each_cpu(i, cpu_sibling_setup_mask) {
400 		if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
401 		    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
402 			cpumask_set_cpu(i, c->llc_shared_map);
403 			cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map);
404 		}
405 		if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
406 			cpumask_set_cpu(i, cpu_core_mask(cpu));
407 			cpumask_set_cpu(cpu, cpu_core_mask(i));
408 			/*
409 			 *  Does this new cpu bringup a new core?
410 			 */
411 			if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) {
412 				/*
413 				 * for each core in package, increment
414 				 * the booted_cores for this new cpu
415 				 */
416 				if (cpumask_first(cpu_sibling_mask(i)) == i)
417 					c->booted_cores++;
418 				/*
419 				 * increment the core count for all
420 				 * the other cpus in this package
421 				 */
422 				if (i != cpu)
423 					cpu_data(i).booted_cores++;
424 			} else if (i != cpu && !c->booted_cores)
425 				c->booted_cores = cpu_data(i).booted_cores;
426 		}
427 	}
428 }
429 
430 /* maps the cpu to the sched domain representing multi-core */
431 const struct cpumask *cpu_coregroup_mask(int cpu)
432 {
433 	struct cpuinfo_x86 *c = &cpu_data(cpu);
434 	/*
435 	 * For perf, we return last level cache shared map.
436 	 * And for power savings, we return cpu_core_map
437 	 */
438 	if ((sched_mc_power_savings || sched_smt_power_savings) &&
439 	    !(cpu_has(c, X86_FEATURE_AMD_DCM)))
440 		return cpu_core_mask(cpu);
441 	else
442 		return c->llc_shared_map;
443 }
444 
445 static void impress_friends(void)
446 {
447 	int cpu;
448 	unsigned long bogosum = 0;
449 	/*
450 	 * Allow the user to impress friends.
451 	 */
452 	pr_debug("Before bogomips.\n");
453 	for_each_possible_cpu(cpu)
454 		if (cpumask_test_cpu(cpu, cpu_callout_mask))
455 			bogosum += cpu_data(cpu).loops_per_jiffy;
456 	printk(KERN_INFO
457 		"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
458 		num_online_cpus(),
459 		bogosum/(500000/HZ),
460 		(bogosum/(5000/HZ))%100);
461 
462 	pr_debug("Before bogocount - setting activated=1.\n");
463 }
464 
465 void __inquire_remote_apic(int apicid)
466 {
467 	unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
468 	char *names[] = { "ID", "VERSION", "SPIV" };
469 	int timeout;
470 	u32 status;
471 
472 	printk(KERN_INFO "Inquiring remote APIC 0x%x...\n", apicid);
473 
474 	for (i = 0; i < ARRAY_SIZE(regs); i++) {
475 		printk(KERN_INFO "... APIC 0x%x %s: ", apicid, names[i]);
476 
477 		/*
478 		 * Wait for idle.
479 		 */
480 		status = safe_apic_wait_icr_idle();
481 		if (status)
482 			printk(KERN_CONT
483 			       "a previous APIC delivery may have failed\n");
484 
485 		apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
486 
487 		timeout = 0;
488 		do {
489 			udelay(100);
490 			status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
491 		} while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
492 
493 		switch (status) {
494 		case APIC_ICR_RR_VALID:
495 			status = apic_read(APIC_RRR);
496 			printk(KERN_CONT "%08x\n", status);
497 			break;
498 		default:
499 			printk(KERN_CONT "failed\n");
500 		}
501 	}
502 }
503 
504 /*
505  * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
506  * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
507  * won't ... remember to clear down the APIC, etc later.
508  */
509 int __cpuinit
510 wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
511 {
512 	unsigned long send_status, accept_status = 0;
513 	int maxlvt;
514 
515 	/* Target chip */
516 	/* Boot on the stack */
517 	/* Kick the second */
518 	apic_icr_write(APIC_DM_NMI | apic->dest_logical, logical_apicid);
519 
520 	pr_debug("Waiting for send to finish...\n");
521 	send_status = safe_apic_wait_icr_idle();
522 
523 	/*
524 	 * Give the other CPU some time to accept the IPI.
525 	 */
526 	udelay(200);
527 	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
528 		maxlvt = lapic_get_maxlvt();
529 		if (maxlvt > 3)			/* Due to the Pentium erratum 3AP.  */
530 			apic_write(APIC_ESR, 0);
531 		accept_status = (apic_read(APIC_ESR) & 0xEF);
532 	}
533 	pr_debug("NMI sent.\n");
534 
535 	if (send_status)
536 		printk(KERN_ERR "APIC never delivered???\n");
537 	if (accept_status)
538 		printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
539 
540 	return (send_status | accept_status);
541 }
542 
543 static int __cpuinit
544 wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
545 {
546 	unsigned long send_status, accept_status = 0;
547 	int maxlvt, num_starts, j;
548 
549 	maxlvt = lapic_get_maxlvt();
550 
551 	/*
552 	 * Be paranoid about clearing APIC errors.
553 	 */
554 	if (APIC_INTEGRATED(apic_version[phys_apicid])) {
555 		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
556 			apic_write(APIC_ESR, 0);
557 		apic_read(APIC_ESR);
558 	}
559 
560 	pr_debug("Asserting INIT.\n");
561 
562 	/*
563 	 * Turn INIT on target chip
564 	 */
565 	/*
566 	 * Send IPI
567 	 */
568 	apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
569 		       phys_apicid);
570 
571 	pr_debug("Waiting for send to finish...\n");
572 	send_status = safe_apic_wait_icr_idle();
573 
574 	mdelay(10);
575 
576 	pr_debug("Deasserting INIT.\n");
577 
578 	/* Target chip */
579 	/* Send IPI */
580 	apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
581 
582 	pr_debug("Waiting for send to finish...\n");
583 	send_status = safe_apic_wait_icr_idle();
584 
585 	mb();
586 	atomic_set(&init_deasserted, 1);
587 
588 	/*
589 	 * Should we send STARTUP IPIs ?
590 	 *
591 	 * Determine this based on the APIC version.
592 	 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
593 	 */
594 	if (APIC_INTEGRATED(apic_version[phys_apicid]))
595 		num_starts = 2;
596 	else
597 		num_starts = 0;
598 
599 	/*
600 	 * Paravirt / VMI wants a startup IPI hook here to set up the
601 	 * target processor state.
602 	 */
603 	startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
604 			 (unsigned long)stack_start.sp);
605 
606 	/*
607 	 * Run STARTUP IPI loop.
608 	 */
609 	pr_debug("#startup loops: %d.\n", num_starts);
610 
611 	for (j = 1; j <= num_starts; j++) {
612 		pr_debug("Sending STARTUP #%d.\n", j);
613 		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
614 			apic_write(APIC_ESR, 0);
615 		apic_read(APIC_ESR);
616 		pr_debug("After apic_write.\n");
617 
618 		/*
619 		 * STARTUP IPI
620 		 */
621 
622 		/* Target chip */
623 		/* Boot on the stack */
624 		/* Kick the second */
625 		apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
626 			       phys_apicid);
627 
628 		/*
629 		 * Give the other CPU some time to accept the IPI.
630 		 */
631 		udelay(300);
632 
633 		pr_debug("Startup point 1.\n");
634 
635 		pr_debug("Waiting for send to finish...\n");
636 		send_status = safe_apic_wait_icr_idle();
637 
638 		/*
639 		 * Give the other CPU some time to accept the IPI.
640 		 */
641 		udelay(200);
642 		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
643 			apic_write(APIC_ESR, 0);
644 		accept_status = (apic_read(APIC_ESR) & 0xEF);
645 		if (send_status || accept_status)
646 			break;
647 	}
648 	pr_debug("After Startup.\n");
649 
650 	if (send_status)
651 		printk(KERN_ERR "APIC never delivered???\n");
652 	if (accept_status)
653 		printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
654 
655 	return (send_status | accept_status);
656 }
657 
658 struct create_idle {
659 	struct work_struct work;
660 	struct task_struct *idle;
661 	struct completion done;
662 	int cpu;
663 };
664 
665 static void __cpuinit do_fork_idle(struct work_struct *work)
666 {
667 	struct create_idle *c_idle =
668 		container_of(work, struct create_idle, work);
669 
670 	c_idle->idle = fork_idle(c_idle->cpu);
671 	complete(&c_idle->done);
672 }
673 
674 /* reduce the number of lines printed when booting a large cpu count system */
675 static void __cpuinit announce_cpu(int cpu, int apicid)
676 {
677 	static int current_node = -1;
678 	int node = cpu_to_node(cpu);
679 
680 	if (system_state == SYSTEM_BOOTING) {
681 		if (node != current_node) {
682 			if (current_node > (-1))
683 				pr_cont(" Ok.\n");
684 			current_node = node;
685 			pr_info("Booting Node %3d, Processors ", node);
686 		}
687 		pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : "");
688 		return;
689 	} else
690 		pr_info("Booting Node %d Processor %d APIC 0x%x\n",
691 			node, cpu, apicid);
692 }
693 
694 /*
695  * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
696  * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
697  * Returns zero if CPU booted OK, else error code from
698  * ->wakeup_secondary_cpu.
699  */
700 static int __cpuinit do_boot_cpu(int apicid, int cpu)
701 {
702 	unsigned long boot_error = 0;
703 	unsigned long start_ip;
704 	int timeout;
705 	struct create_idle c_idle = {
706 		.cpu	= cpu,
707 		.done	= COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
708 	};
709 
710 	INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle);
711 
712 	alternatives_smp_switch(1);
713 
714 	c_idle.idle = get_idle_for_cpu(cpu);
715 
716 	/*
717 	 * We can't use kernel_thread since we must avoid to
718 	 * reschedule the child.
719 	 */
720 	if (c_idle.idle) {
721 		c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
722 			(THREAD_SIZE +  task_stack_page(c_idle.idle))) - 1);
723 		init_idle(c_idle.idle, cpu);
724 		goto do_rest;
725 	}
726 
727 	if (!keventd_up() || current_is_keventd())
728 		c_idle.work.func(&c_idle.work);
729 	else {
730 		schedule_work(&c_idle.work);
731 		wait_for_completion(&c_idle.done);
732 	}
733 
734 	if (IS_ERR(c_idle.idle)) {
735 		printk("failed fork for CPU %d\n", cpu);
736 		destroy_work_on_stack(&c_idle.work);
737 		return PTR_ERR(c_idle.idle);
738 	}
739 
740 	set_idle_for_cpu(cpu, c_idle.idle);
741 do_rest:
742 	per_cpu(current_task, cpu) = c_idle.idle;
743 #ifdef CONFIG_X86_32
744 	/* Stack for startup_32 can be just as for start_secondary onwards */
745 	irq_ctx_init(cpu);
746 #else
747 	clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
748 	initial_gs = per_cpu_offset(cpu);
749 	per_cpu(kernel_stack, cpu) =
750 		(unsigned long)task_stack_page(c_idle.idle) -
751 		KERNEL_STACK_OFFSET + THREAD_SIZE;
752 #endif
753 	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
754 	initial_code = (unsigned long)start_secondary;
755 	stack_start.sp = (void *) c_idle.idle->thread.sp;
756 
757 	/* start_ip had better be page-aligned! */
758 	start_ip = setup_trampoline();
759 
760 	/* So we see what's up */
761 	announce_cpu(cpu, apicid);
762 
763 	/*
764 	 * This grunge runs the startup process for
765 	 * the targeted processor.
766 	 */
767 
768 	atomic_set(&init_deasserted, 0);
769 
770 	if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
771 
772 		pr_debug("Setting warm reset code and vector.\n");
773 
774 		smpboot_setup_warm_reset_vector(start_ip);
775 		/*
776 		 * Be paranoid about clearing APIC errors.
777 		*/
778 		if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
779 			apic_write(APIC_ESR, 0);
780 			apic_read(APIC_ESR);
781 		}
782 	}
783 
784 	/*
785 	 * Kick the secondary CPU. Use the method in the APIC driver
786 	 * if it's defined - or use an INIT boot APIC message otherwise:
787 	 */
788 	if (apic->wakeup_secondary_cpu)
789 		boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
790 	else
791 		boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
792 
793 	if (!boot_error) {
794 		/*
795 		 * allow APs to start initializing.
796 		 */
797 		pr_debug("Before Callout %d.\n", cpu);
798 		cpumask_set_cpu(cpu, cpu_callout_mask);
799 		pr_debug("After Callout %d.\n", cpu);
800 
801 		/*
802 		 * Wait 5s total for a response
803 		 */
804 		for (timeout = 0; timeout < 50000; timeout++) {
805 			if (cpumask_test_cpu(cpu, cpu_callin_mask))
806 				break;	/* It has booted */
807 			udelay(100);
808 		}
809 
810 		if (cpumask_test_cpu(cpu, cpu_callin_mask))
811 			pr_debug("CPU%d: has booted.\n", cpu);
812 		else {
813 			boot_error = 1;
814 			if (*((volatile unsigned char *)trampoline_base)
815 					== 0xA5)
816 				/* trampoline started but...? */
817 				pr_err("CPU%d: Stuck ??\n", cpu);
818 			else
819 				/* trampoline code not run */
820 				pr_err("CPU%d: Not responding.\n", cpu);
821 			if (apic->inquire_remote_apic)
822 				apic->inquire_remote_apic(apicid);
823 		}
824 	}
825 
826 	if (boot_error) {
827 		/* Try to put things back the way they were before ... */
828 		numa_remove_cpu(cpu); /* was set by numa_add_cpu */
829 
830 		/* was set by do_boot_cpu() */
831 		cpumask_clear_cpu(cpu, cpu_callout_mask);
832 
833 		/* was set by cpu_init() */
834 		cpumask_clear_cpu(cpu, cpu_initialized_mask);
835 
836 		set_cpu_present(cpu, false);
837 		per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
838 	}
839 
840 	/* mark "stuck" area as not stuck */
841 	*((volatile unsigned long *)trampoline_base) = 0;
842 
843 	if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
844 		/*
845 		 * Cleanup possible dangling ends...
846 		 */
847 		smpboot_restore_warm_reset_vector();
848 	}
849 
850 	destroy_work_on_stack(&c_idle.work);
851 	return boot_error;
852 }
853 
854 int __cpuinit native_cpu_up(unsigned int cpu)
855 {
856 	int apicid = apic->cpu_present_to_apicid(cpu);
857 	unsigned long flags;
858 	int err;
859 
860 	WARN_ON(irqs_disabled());
861 
862 	pr_debug("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
863 
864 	if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
865 	    !physid_isset(apicid, phys_cpu_present_map)) {
866 		printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu);
867 		return -EINVAL;
868 	}
869 
870 	/*
871 	 * Already booted CPU?
872 	 */
873 	if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
874 		pr_debug("do_boot_cpu %d Already started\n", cpu);
875 		return -ENOSYS;
876 	}
877 
878 	/*
879 	 * Save current MTRR state in case it was changed since early boot
880 	 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
881 	 */
882 	mtrr_save_state();
883 
884 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
885 
886 #ifdef CONFIG_X86_32
887 	/* init low mem mapping */
888 	clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
889 		min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
890 	flush_tlb_all();
891 	low_mappings = 1;
892 
893 	err = do_boot_cpu(apicid, cpu);
894 
895 	zap_low_mappings(false);
896 	low_mappings = 0;
897 #else
898 	err = do_boot_cpu(apicid, cpu);
899 #endif
900 	if (err) {
901 		pr_debug("do_boot_cpu failed %d\n", err);
902 		return -EIO;
903 	}
904 
905 	/*
906 	 * Check TSC synchronization with the AP (keep irqs disabled
907 	 * while doing so):
908 	 */
909 	local_irq_save(flags);
910 	check_tsc_sync_source(cpu);
911 	local_irq_restore(flags);
912 
913 	while (!cpu_online(cpu)) {
914 		cpu_relax();
915 		touch_nmi_watchdog();
916 	}
917 
918 	return 0;
919 }
920 
921 /*
922  * Fall back to non SMP mode after errors.
923  *
924  * RED-PEN audit/test this more. I bet there is more state messed up here.
925  */
926 static __init void disable_smp(void)
927 {
928 	init_cpu_present(cpumask_of(0));
929 	init_cpu_possible(cpumask_of(0));
930 	smpboot_clear_io_apic_irqs();
931 
932 	if (smp_found_config)
933 		physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
934 	else
935 		physid_set_mask_of_physid(0, &phys_cpu_present_map);
936 	map_cpu_to_logical_apicid();
937 	cpumask_set_cpu(0, cpu_sibling_mask(0));
938 	cpumask_set_cpu(0, cpu_core_mask(0));
939 }
940 
941 /*
942  * Various sanity checks.
943  */
944 static int __init smp_sanity_check(unsigned max_cpus)
945 {
946 	preempt_disable();
947 
948 #if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
949 	if (def_to_bigsmp && nr_cpu_ids > 8) {
950 		unsigned int cpu;
951 		unsigned nr;
952 
953 		printk(KERN_WARNING
954 		       "More than 8 CPUs detected - skipping them.\n"
955 		       "Use CONFIG_X86_BIGSMP.\n");
956 
957 		nr = 0;
958 		for_each_present_cpu(cpu) {
959 			if (nr >= 8)
960 				set_cpu_present(cpu, false);
961 			nr++;
962 		}
963 
964 		nr = 0;
965 		for_each_possible_cpu(cpu) {
966 			if (nr >= 8)
967 				set_cpu_possible(cpu, false);
968 			nr++;
969 		}
970 
971 		nr_cpu_ids = 8;
972 	}
973 #endif
974 
975 	if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
976 		printk(KERN_WARNING
977 			"weird, boot CPU (#%d) not listed by the BIOS.\n",
978 			hard_smp_processor_id());
979 
980 		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
981 	}
982 
983 	/*
984 	 * If we couldn't find an SMP configuration at boot time,
985 	 * get out of here now!
986 	 */
987 	if (!smp_found_config && !acpi_lapic) {
988 		preempt_enable();
989 		printk(KERN_NOTICE "SMP motherboard not detected.\n");
990 		disable_smp();
991 		if (APIC_init_uniprocessor())
992 			printk(KERN_NOTICE "Local APIC not detected."
993 					   " Using dummy APIC emulation.\n");
994 		return -1;
995 	}
996 
997 	/*
998 	 * Should not be necessary because the MP table should list the boot
999 	 * CPU too, but we do it for the sake of robustness anyway.
1000 	 */
1001 	if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
1002 		printk(KERN_NOTICE
1003 			"weird, boot CPU (#%d) not listed by the BIOS.\n",
1004 			boot_cpu_physical_apicid);
1005 		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1006 	}
1007 	preempt_enable();
1008 
1009 	/*
1010 	 * If we couldn't find a local APIC, then get out of here now!
1011 	 */
1012 	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
1013 	    !cpu_has_apic) {
1014 		if (!disable_apic) {
1015 			pr_err("BIOS bug, local APIC #%d not detected!...\n",
1016 				boot_cpu_physical_apicid);
1017 			pr_err("... forcing use of dummy APIC emulation."
1018 				"(tell your hw vendor)\n");
1019 		}
1020 		smpboot_clear_io_apic();
1021 		arch_disable_smp_support();
1022 		return -1;
1023 	}
1024 
1025 	verify_local_APIC();
1026 
1027 	/*
1028 	 * If SMP should be disabled, then really disable it!
1029 	 */
1030 	if (!max_cpus) {
1031 		printk(KERN_INFO "SMP mode deactivated.\n");
1032 		smpboot_clear_io_apic();
1033 
1034 		localise_nmi_watchdog();
1035 
1036 		connect_bsp_APIC();
1037 		setup_local_APIC();
1038 		end_local_APIC_setup();
1039 		return -1;
1040 	}
1041 
1042 	return 0;
1043 }
1044 
1045 static void __init smp_cpu_index_default(void)
1046 {
1047 	int i;
1048 	struct cpuinfo_x86 *c;
1049 
1050 	for_each_possible_cpu(i) {
1051 		c = &cpu_data(i);
1052 		/* mark all to hotplug */
1053 		c->cpu_index = nr_cpu_ids;
1054 	}
1055 }
1056 
1057 /*
1058  * Prepare for SMP bootup.  The MP table or ACPI has been read
1059  * earlier.  Just do some sanity checking here and enable APIC mode.
1060  */
1061 void __init native_smp_prepare_cpus(unsigned int max_cpus)
1062 {
1063 	unsigned int i;
1064 
1065 	preempt_disable();
1066 	smp_cpu_index_default();
1067 	current_cpu_data = boot_cpu_data;
1068 	cpumask_copy(cpu_callin_mask, cpumask_of(0));
1069 	mb();
1070 	/*
1071 	 * Setup boot CPU information
1072 	 */
1073 	smp_store_cpu_info(0); /* Final full version of the data */
1074 #ifdef CONFIG_X86_32
1075 	boot_cpu_logical_apicid = logical_smp_processor_id();
1076 #endif
1077 	current_thread_info()->cpu = 0;  /* needed? */
1078 	for_each_possible_cpu(i) {
1079 		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1080 		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1081 		zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
1082 	}
1083 	set_cpu_sibling_map(0);
1084 
1085 	enable_IR_x2apic();
1086 	default_setup_apic_routing();
1087 
1088 	if (smp_sanity_check(max_cpus) < 0) {
1089 		printk(KERN_INFO "SMP disabled\n");
1090 		disable_smp();
1091 		goto out;
1092 	}
1093 
1094 	preempt_disable();
1095 	if (read_apic_id() != boot_cpu_physical_apicid) {
1096 		panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1097 		     read_apic_id(), boot_cpu_physical_apicid);
1098 		/* Or can we switch back to PIC here? */
1099 	}
1100 	preempt_enable();
1101 
1102 	connect_bsp_APIC();
1103 
1104 	/*
1105 	 * Switch from PIC to APIC mode.
1106 	 */
1107 	setup_local_APIC();
1108 
1109 	/*
1110 	 * Enable IO APIC before setting up error vector
1111 	 */
1112 	if (!skip_ioapic_setup && nr_ioapics)
1113 		enable_IO_APIC();
1114 
1115 	end_local_APIC_setup();
1116 
1117 	map_cpu_to_logical_apicid();
1118 
1119 	if (apic->setup_portio_remap)
1120 		apic->setup_portio_remap();
1121 
1122 	smpboot_setup_io_apic();
1123 	/*
1124 	 * Set up local APIC timer on boot CPU.
1125 	 */
1126 
1127 	printk(KERN_INFO "CPU%d: ", 0);
1128 	print_cpu_info(&cpu_data(0));
1129 	x86_init.timers.setup_percpu_clockev();
1130 
1131 	if (is_uv_system())
1132 		uv_system_init();
1133 
1134 	set_mtrr_aps_delayed_init();
1135 out:
1136 	preempt_enable();
1137 }
1138 
1139 void arch_enable_nonboot_cpus_begin(void)
1140 {
1141 	set_mtrr_aps_delayed_init();
1142 }
1143 
1144 void arch_enable_nonboot_cpus_end(void)
1145 {
1146 	mtrr_aps_init();
1147 }
1148 
1149 /*
1150  * Early setup to make printk work.
1151  */
1152 void __init native_smp_prepare_boot_cpu(void)
1153 {
1154 	int me = smp_processor_id();
1155 	switch_to_new_gdt(me);
1156 	/* already set me in cpu_online_mask in boot_cpu_init() */
1157 	cpumask_set_cpu(me, cpu_callout_mask);
1158 	per_cpu(cpu_state, me) = CPU_ONLINE;
1159 }
1160 
1161 void __init native_smp_cpus_done(unsigned int max_cpus)
1162 {
1163 	pr_debug("Boot done.\n");
1164 
1165 	impress_friends();
1166 #ifdef CONFIG_X86_IO_APIC
1167 	setup_ioapic_dest();
1168 #endif
1169 	check_nmi_watchdog();
1170 	mtrr_aps_init();
1171 }
1172 
1173 static int __initdata setup_possible_cpus = -1;
1174 static int __init _setup_possible_cpus(char *str)
1175 {
1176 	get_option(&str, &setup_possible_cpus);
1177 	return 0;
1178 }
1179 early_param("possible_cpus", _setup_possible_cpus);
1180 
1181 
1182 /*
1183  * cpu_possible_mask should be static, it cannot change as cpu's
1184  * are onlined, or offlined. The reason is per-cpu data-structures
1185  * are allocated by some modules at init time, and dont expect to
1186  * do this dynamically on cpu arrival/departure.
1187  * cpu_present_mask on the other hand can change dynamically.
1188  * In case when cpu_hotplug is not compiled, then we resort to current
1189  * behaviour, which is cpu_possible == cpu_present.
1190  * - Ashok Raj
1191  *
1192  * Three ways to find out the number of additional hotplug CPUs:
1193  * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
1194  * - The user can overwrite it with possible_cpus=NUM
1195  * - Otherwise don't reserve additional CPUs.
1196  * We do this because additional CPUs waste a lot of memory.
1197  * -AK
1198  */
1199 __init void prefill_possible_map(void)
1200 {
1201 	int i, possible;
1202 
1203 	/* no processor from mptable or madt */
1204 	if (!num_processors)
1205 		num_processors = 1;
1206 
1207 	if (setup_possible_cpus == -1)
1208 		possible = num_processors + disabled_cpus;
1209 	else
1210 		possible = setup_possible_cpus;
1211 
1212 	total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1213 
1214 	if (possible > CONFIG_NR_CPUS) {
1215 		printk(KERN_WARNING
1216 			"%d Processors exceeds NR_CPUS limit of %d\n",
1217 			possible, CONFIG_NR_CPUS);
1218 		possible = CONFIG_NR_CPUS;
1219 	}
1220 
1221 	printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
1222 		possible, max_t(int, possible - num_processors, 0));
1223 
1224 	for (i = 0; i < possible; i++)
1225 		set_cpu_possible(i, true);
1226 
1227 	nr_cpu_ids = possible;
1228 }
1229 
1230 #ifdef CONFIG_HOTPLUG_CPU
1231 
1232 static void remove_siblinginfo(int cpu)
1233 {
1234 	int sibling;
1235 	struct cpuinfo_x86 *c = &cpu_data(cpu);
1236 
1237 	for_each_cpu(sibling, cpu_core_mask(cpu)) {
1238 		cpumask_clear_cpu(cpu, cpu_core_mask(sibling));
1239 		/*/
1240 		 * last thread sibling in this cpu core going down
1241 		 */
1242 		if (cpumask_weight(cpu_sibling_mask(cpu)) == 1)
1243 			cpu_data(sibling).booted_cores--;
1244 	}
1245 
1246 	for_each_cpu(sibling, cpu_sibling_mask(cpu))
1247 		cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
1248 	cpumask_clear(cpu_sibling_mask(cpu));
1249 	cpumask_clear(cpu_core_mask(cpu));
1250 	c->phys_proc_id = 0;
1251 	c->cpu_core_id = 0;
1252 	cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1253 }
1254 
1255 static void __ref remove_cpu_from_maps(int cpu)
1256 {
1257 	set_cpu_online(cpu, false);
1258 	cpumask_clear_cpu(cpu, cpu_callout_mask);
1259 	cpumask_clear_cpu(cpu, cpu_callin_mask);
1260 	/* was set by cpu_init() */
1261 	cpumask_clear_cpu(cpu, cpu_initialized_mask);
1262 	numa_remove_cpu(cpu);
1263 }
1264 
1265 void cpu_disable_common(void)
1266 {
1267 	int cpu = smp_processor_id();
1268 
1269 	remove_siblinginfo(cpu);
1270 
1271 	/* It's now safe to remove this processor from the online map */
1272 	lock_vector_lock();
1273 	remove_cpu_from_maps(cpu);
1274 	unlock_vector_lock();
1275 	fixup_irqs();
1276 }
1277 
1278 int native_cpu_disable(void)
1279 {
1280 	int cpu = smp_processor_id();
1281 
1282 	/*
1283 	 * Perhaps use cpufreq to drop frequency, but that could go
1284 	 * into generic code.
1285 	 *
1286 	 * We won't take down the boot processor on i386 due to some
1287 	 * interrupts only being able to be serviced by the BSP.
1288 	 * Especially so if we're not using an IOAPIC	-zwane
1289 	 */
1290 	if (cpu == 0)
1291 		return -EBUSY;
1292 
1293 	if (nmi_watchdog == NMI_LOCAL_APIC)
1294 		stop_apic_nmi_watchdog(NULL);
1295 	clear_local_APIC();
1296 
1297 	cpu_disable_common();
1298 	return 0;
1299 }
1300 
1301 void native_cpu_die(unsigned int cpu)
1302 {
1303 	/* We don't do anything here: idle task is faking death itself. */
1304 	unsigned int i;
1305 
1306 	for (i = 0; i < 10; i++) {
1307 		/* They ack this in play_dead by setting CPU_DEAD */
1308 		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1309 			if (system_state == SYSTEM_RUNNING)
1310 				pr_info("CPU %u is now offline\n", cpu);
1311 
1312 			if (1 == num_online_cpus())
1313 				alternatives_smp_switch(0);
1314 			return;
1315 		}
1316 		msleep(100);
1317 	}
1318 	pr_err("CPU %u didn't die...\n", cpu);
1319 }
1320 
1321 void play_dead_common(void)
1322 {
1323 	idle_task_exit();
1324 	reset_lazy_tlbstate();
1325 	irq_ctx_exit(raw_smp_processor_id());
1326 	c1e_remove_cpu(raw_smp_processor_id());
1327 
1328 	mb();
1329 	/* Ack it */
1330 	__get_cpu_var(cpu_state) = CPU_DEAD;
1331 
1332 	/*
1333 	 * With physical CPU hotplug, we should halt the cpu
1334 	 */
1335 	local_irq_disable();
1336 }
1337 
1338 void native_play_dead(void)
1339 {
1340 	play_dead_common();
1341 	tboot_shutdown(TB_SHUTDOWN_WFS);
1342 	wbinvd_halt();
1343 }
1344 
1345 #else /* ... !CONFIG_HOTPLUG_CPU */
1346 int native_cpu_disable(void)
1347 {
1348 	return -ENOSYS;
1349 }
1350 
1351 void native_cpu_die(unsigned int cpu)
1352 {
1353 	/* We said "no" in __cpu_disable */
1354 	BUG();
1355 }
1356 
1357 void native_play_dead(void)
1358 {
1359 	BUG();
1360 }
1361 
1362 #endif
1363