xref: /openbmc/linux/arch/powerpc/kernel/smp.c (revision 512691d4907d7cf4b8d05c6f8572d1fa60ccec20)
15ad57078SPaul Mackerras /*
25ad57078SPaul Mackerras  * SMP support for ppc.
35ad57078SPaul Mackerras  *
45ad57078SPaul Mackerras  * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
55ad57078SPaul Mackerras  * deal of code from the sparc and intel versions.
65ad57078SPaul Mackerras  *
75ad57078SPaul Mackerras  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
85ad57078SPaul Mackerras  *
95ad57078SPaul Mackerras  * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
105ad57078SPaul Mackerras  * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
115ad57078SPaul Mackerras  *
125ad57078SPaul Mackerras  *      This program is free software; you can redistribute it and/or
135ad57078SPaul Mackerras  *      modify it under the terms of the GNU General Public License
145ad57078SPaul Mackerras  *      as published by the Free Software Foundation; either version
155ad57078SPaul Mackerras  *      2 of the License, or (at your option) any later version.
165ad57078SPaul Mackerras  */
175ad57078SPaul Mackerras 
185ad57078SPaul Mackerras #undef DEBUG
195ad57078SPaul Mackerras 
205ad57078SPaul Mackerras #include <linux/kernel.h>
214b16f8e2SPaul Gortmaker #include <linux/export.h>
225ad57078SPaul Mackerras #include <linux/sched.h>
235ad57078SPaul Mackerras #include <linux/smp.h>
245ad57078SPaul Mackerras #include <linux/interrupt.h>
255ad57078SPaul Mackerras #include <linux/delay.h>
265ad57078SPaul Mackerras #include <linux/init.h>
275ad57078SPaul Mackerras #include <linux/spinlock.h>
285ad57078SPaul Mackerras #include <linux/cache.h>
295ad57078SPaul Mackerras #include <linux/err.h>
308a25a2fdSKay Sievers #include <linux/device.h>
315ad57078SPaul Mackerras #include <linux/cpu.h>
325ad57078SPaul Mackerras #include <linux/notifier.h>
334b703a23SAnton Blanchard #include <linux/topology.h>
345ad57078SPaul Mackerras 
355ad57078SPaul Mackerras #include <asm/ptrace.h>
3660063497SArun Sharma #include <linux/atomic.h>
375ad57078SPaul Mackerras #include <asm/irq.h>
385ad57078SPaul Mackerras #include <asm/page.h>
395ad57078SPaul Mackerras #include <asm/pgtable.h>
405ad57078SPaul Mackerras #include <asm/prom.h>
415ad57078SPaul Mackerras #include <asm/smp.h>
425ad57078SPaul Mackerras #include <asm/time.h>
435ad57078SPaul Mackerras #include <asm/machdep.h>
44e2075f79SNathan Lynch #include <asm/cputhreads.h>
455ad57078SPaul Mackerras #include <asm/cputable.h>
465ad57078SPaul Mackerras #include <asm/mpic.h>
47a7f290daSBenjamin Herrenschmidt #include <asm/vdso_datapage.h>
485ad57078SPaul Mackerras #ifdef CONFIG_PPC64
495ad57078SPaul Mackerras #include <asm/paca.h>
505ad57078SPaul Mackerras #endif
5118ad51ddSAnton Blanchard #include <asm/vdso.h>
52ae3a197eSDavid Howells #include <asm/debug.h>
535ad57078SPaul Mackerras 
545ad57078SPaul Mackerras #ifdef DEBUG
55f9e4ec57SMichael Ellerman #include <asm/udbg.h>
565ad57078SPaul Mackerras #define DBG(fmt...) udbg_printf(fmt)
575ad57078SPaul Mackerras #else
585ad57078SPaul Mackerras #define DBG(fmt...)
595ad57078SPaul Mackerras #endif
605ad57078SPaul Mackerras 
61c56e5853SBenjamin Herrenschmidt #ifdef CONFIG_HOTPLUG_CPU
62fb82b839SBenjamin Herrenschmidt /* State of each CPU during hotplug phases */
63fb82b839SBenjamin Herrenschmidt static DEFINE_PER_CPU(int, cpu_state) = { 0 };
64c56e5853SBenjamin Herrenschmidt #endif
65c56e5853SBenjamin Herrenschmidt 
66f9e4ec57SMichael Ellerman struct thread_info *secondary_ti;
67f9e4ec57SMichael Ellerman 
68cc1ba8eaSAnton Blanchard DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
69cc1ba8eaSAnton Blanchard DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
705ad57078SPaul Mackerras 
71d5a7430dSMike Travis EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
72440a0857SNathan Lynch EXPORT_PER_CPU_SYMBOL(cpu_core_map);
735ad57078SPaul Mackerras 
745ad57078SPaul Mackerras /* SMP operations for this machine */
755ad57078SPaul Mackerras struct smp_ops_t *smp_ops;
765ad57078SPaul Mackerras 
777ccbe504SBenjamin Herrenschmidt /* Can't be static due to PowerMac hackery */
787ccbe504SBenjamin Herrenschmidt volatile unsigned int cpu_callin_map[NR_CPUS];
795ad57078SPaul Mackerras 
805ad57078SPaul Mackerras int smt_enabled_at_boot = 1;
815ad57078SPaul Mackerras 
82cc532915SMichael Ellerman static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
83cc532915SMichael Ellerman 
845ad57078SPaul Mackerras #ifdef CONFIG_PPC64
85de300974SMichael Ellerman int __devinit smp_generic_kick_cpu(int nr)
865ad57078SPaul Mackerras {
875ad57078SPaul Mackerras 	BUG_ON(nr < 0 || nr >= NR_CPUS);
885ad57078SPaul Mackerras 
895ad57078SPaul Mackerras 	/*
905ad57078SPaul Mackerras 	 * The processor is currently spinning, waiting for the
915ad57078SPaul Mackerras 	 * cpu_start field to become non-zero After we set cpu_start,
925ad57078SPaul Mackerras 	 * the processor will continue on to secondary_start
935ad57078SPaul Mackerras 	 */
94fb82b839SBenjamin Herrenschmidt 	if (!paca[nr].cpu_start) {
955ad57078SPaul Mackerras 		paca[nr].cpu_start = 1;
965ad57078SPaul Mackerras 		smp_mb();
97fb82b839SBenjamin Herrenschmidt 		return 0;
98fb82b839SBenjamin Herrenschmidt 	}
99fb82b839SBenjamin Herrenschmidt 
100fb82b839SBenjamin Herrenschmidt #ifdef CONFIG_HOTPLUG_CPU
101fb82b839SBenjamin Herrenschmidt 	/*
102fb82b839SBenjamin Herrenschmidt 	 * Ok it's not there, so it might be soft-unplugged, let's
103fb82b839SBenjamin Herrenschmidt 	 * try to bring it back
104fb82b839SBenjamin Herrenschmidt 	 */
105fb82b839SBenjamin Herrenschmidt 	per_cpu(cpu_state, nr) = CPU_UP_PREPARE;
106fb82b839SBenjamin Herrenschmidt 	smp_wmb();
107fb82b839SBenjamin Herrenschmidt 	smp_send_reschedule(nr);
108fb82b839SBenjamin Herrenschmidt #endif /* CONFIG_HOTPLUG_CPU */
109de300974SMichael Ellerman 
110de300974SMichael Ellerman 	return 0;
1115ad57078SPaul Mackerras }
112fb82b839SBenjamin Herrenschmidt #endif /* CONFIG_PPC64 */
1135ad57078SPaul Mackerras 
11425ddd738SMilton Miller static irqreturn_t call_function_action(int irq, void *data)
11525ddd738SMilton Miller {
11625ddd738SMilton Miller 	generic_smp_call_function_interrupt();
11725ddd738SMilton Miller 	return IRQ_HANDLED;
11825ddd738SMilton Miller }
11925ddd738SMilton Miller 
12025ddd738SMilton Miller static irqreturn_t reschedule_action(int irq, void *data)
12125ddd738SMilton Miller {
122184748ccSPeter Zijlstra 	scheduler_ipi();
12325ddd738SMilton Miller 	return IRQ_HANDLED;
12425ddd738SMilton Miller }
12525ddd738SMilton Miller 
12625ddd738SMilton Miller static irqreturn_t call_function_single_action(int irq, void *data)
12725ddd738SMilton Miller {
12825ddd738SMilton Miller 	generic_smp_call_function_single_interrupt();
12925ddd738SMilton Miller 	return IRQ_HANDLED;
13025ddd738SMilton Miller }
13125ddd738SMilton Miller 
1327ef71d75SMilton Miller static irqreturn_t debug_ipi_action(int irq, void *data)
13325ddd738SMilton Miller {
13423d72bfdSMilton Miller 	if (crash_ipi_function_ptr) {
13523d72bfdSMilton Miller 		crash_ipi_function_ptr(get_irq_regs());
13623d72bfdSMilton Miller 		return IRQ_HANDLED;
13723d72bfdSMilton Miller 	}
13823d72bfdSMilton Miller 
13923d72bfdSMilton Miller #ifdef CONFIG_DEBUGGER
14023d72bfdSMilton Miller 	debugger_ipi(get_irq_regs());
14123d72bfdSMilton Miller #endif /* CONFIG_DEBUGGER */
14223d72bfdSMilton Miller 
14325ddd738SMilton Miller 	return IRQ_HANDLED;
14425ddd738SMilton Miller }
14525ddd738SMilton Miller 
14625ddd738SMilton Miller static irq_handler_t smp_ipi_action[] = {
14725ddd738SMilton Miller 	[PPC_MSG_CALL_FUNCTION] =  call_function_action,
14825ddd738SMilton Miller 	[PPC_MSG_RESCHEDULE] = reschedule_action,
14925ddd738SMilton Miller 	[PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action,
15025ddd738SMilton Miller 	[PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
15125ddd738SMilton Miller };
15225ddd738SMilton Miller 
15325ddd738SMilton Miller const char *smp_ipi_name[] = {
15425ddd738SMilton Miller 	[PPC_MSG_CALL_FUNCTION] =  "ipi call function",
15525ddd738SMilton Miller 	[PPC_MSG_RESCHEDULE] = "ipi reschedule",
15625ddd738SMilton Miller 	[PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single",
15725ddd738SMilton Miller 	[PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
15825ddd738SMilton Miller };
15925ddd738SMilton Miller 
16025ddd738SMilton Miller /* optional function to request ipi, for controllers with >= 4 ipis */
16125ddd738SMilton Miller int smp_request_message_ipi(int virq, int msg)
16225ddd738SMilton Miller {
16325ddd738SMilton Miller 	int err;
16425ddd738SMilton Miller 
16525ddd738SMilton Miller 	if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
16625ddd738SMilton Miller 		return -EINVAL;
16725ddd738SMilton Miller 	}
16825ddd738SMilton Miller #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
16925ddd738SMilton Miller 	if (msg == PPC_MSG_DEBUGGER_BREAK) {
17025ddd738SMilton Miller 		return 1;
17125ddd738SMilton Miller 	}
17225ddd738SMilton Miller #endif
1733b5e16d7SThomas Gleixner 	err = request_irq(virq, smp_ipi_action[msg],
1743b5e16d7SThomas Gleixner 			  IRQF_PERCPU | IRQF_NO_THREAD,
17525ddd738SMilton Miller 			  smp_ipi_name[msg], 0);
17625ddd738SMilton Miller 	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
17725ddd738SMilton Miller 		virq, smp_ipi_name[msg], err);
17825ddd738SMilton Miller 
17925ddd738SMilton Miller 	return err;
18025ddd738SMilton Miller }
18125ddd738SMilton Miller 
1821ece355bSMilton Miller #ifdef CONFIG_PPC_SMP_MUXED_IPI
18323d72bfdSMilton Miller struct cpu_messages {
18471454272SMilton Miller 	int messages;			/* current messages */
18523d72bfdSMilton Miller 	unsigned long data;		/* data for cause ipi */
18623d72bfdSMilton Miller };
18723d72bfdSMilton Miller static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
18823d72bfdSMilton Miller 
18923d72bfdSMilton Miller void smp_muxed_ipi_set_data(int cpu, unsigned long data)
19023d72bfdSMilton Miller {
19123d72bfdSMilton Miller 	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
19223d72bfdSMilton Miller 
19323d72bfdSMilton Miller 	info->data = data;
19423d72bfdSMilton Miller }
19523d72bfdSMilton Miller 
19623d72bfdSMilton Miller void smp_muxed_ipi_message_pass(int cpu, int msg)
19723d72bfdSMilton Miller {
19823d72bfdSMilton Miller 	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
19971454272SMilton Miller 	char *message = (char *)&info->messages;
20023d72bfdSMilton Miller 
2019fb1b36cSPaul Mackerras 	/*
2029fb1b36cSPaul Mackerras 	 * Order previous accesses before accesses in the IPI handler.
2039fb1b36cSPaul Mackerras 	 */
2049fb1b36cSPaul Mackerras 	smp_mb();
20571454272SMilton Miller 	message[msg] = 1;
2069fb1b36cSPaul Mackerras 	/*
2079fb1b36cSPaul Mackerras 	 * cause_ipi functions are required to include a full barrier
2089fb1b36cSPaul Mackerras 	 * before doing whatever causes the IPI.
2099fb1b36cSPaul Mackerras 	 */
21023d72bfdSMilton Miller 	smp_ops->cause_ipi(cpu, info->data);
21123d72bfdSMilton Miller }
21223d72bfdSMilton Miller 
21323d72bfdSMilton Miller irqreturn_t smp_ipi_demux(void)
21423d72bfdSMilton Miller {
21523d72bfdSMilton Miller 	struct cpu_messages *info = &__get_cpu_var(ipi_message);
21671454272SMilton Miller 	unsigned int all;
21723d72bfdSMilton Miller 
21823d72bfdSMilton Miller 	mb();	/* order any irq clear */
21971454272SMilton Miller 
22071454272SMilton Miller 	do {
2219fb1b36cSPaul Mackerras 		all = xchg(&info->messages, 0);
22271454272SMilton Miller 
22371454272SMilton Miller #ifdef __BIG_ENDIAN
22471454272SMilton Miller 		if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
22523d72bfdSMilton Miller 			generic_smp_call_function_interrupt();
22671454272SMilton Miller 		if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE)))
227880102e7SBenjamin Herrenschmidt 			scheduler_ipi();
22871454272SMilton Miller 		if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE)))
22923d72bfdSMilton Miller 			generic_smp_call_function_single_interrupt();
23071454272SMilton Miller 		if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK)))
23123d72bfdSMilton Miller 			debug_ipi_action(0, NULL);
23271454272SMilton Miller #else
23371454272SMilton Miller #error Unsupported ENDIAN
23423d72bfdSMilton Miller #endif
23571454272SMilton Miller 	} while (info->messages);
23671454272SMilton Miller 
23723d72bfdSMilton Miller 	return IRQ_HANDLED;
23823d72bfdSMilton Miller }
2391ece355bSMilton Miller #endif /* CONFIG_PPC_SMP_MUXED_IPI */
24023d72bfdSMilton Miller 
2419ca980dcSPaul Mackerras static inline void do_message_pass(int cpu, int msg)
2429ca980dcSPaul Mackerras {
2439ca980dcSPaul Mackerras 	if (smp_ops->message_pass)
2449ca980dcSPaul Mackerras 		smp_ops->message_pass(cpu, msg);
2459ca980dcSPaul Mackerras #ifdef CONFIG_PPC_SMP_MUXED_IPI
2469ca980dcSPaul Mackerras 	else
2479ca980dcSPaul Mackerras 		smp_muxed_ipi_message_pass(cpu, msg);
2489ca980dcSPaul Mackerras #endif
2499ca980dcSPaul Mackerras }
2509ca980dcSPaul Mackerras 
2515ad57078SPaul Mackerras void smp_send_reschedule(int cpu)
2525ad57078SPaul Mackerras {
2538cffc6acSBenjamin Herrenschmidt 	if (likely(smp_ops))
2549ca980dcSPaul Mackerras 		do_message_pass(cpu, PPC_MSG_RESCHEDULE);
2555ad57078SPaul Mackerras }
256de56a948SPaul Mackerras EXPORT_SYMBOL_GPL(smp_send_reschedule);
2575ad57078SPaul Mackerras 
258b7d7a240SJens Axboe void arch_send_call_function_single_ipi(int cpu)
259b7d7a240SJens Axboe {
2609ca980dcSPaul Mackerras 	do_message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
261b7d7a240SJens Axboe }
262b7d7a240SJens Axboe 
263f063ea02SRusty Russell void arch_send_call_function_ipi_mask(const struct cpumask *mask)
264b7d7a240SJens Axboe {
265b7d7a240SJens Axboe 	unsigned int cpu;
266b7d7a240SJens Axboe 
267f063ea02SRusty Russell 	for_each_cpu(cpu, mask)
2689ca980dcSPaul Mackerras 		do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
269b7d7a240SJens Axboe }
270b7d7a240SJens Axboe 
271e0476371SMilton Miller #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
272e0476371SMilton Miller void smp_send_debugger_break(void)
2735ad57078SPaul Mackerras {
274e0476371SMilton Miller 	int cpu;
275e0476371SMilton Miller 	int me = raw_smp_processor_id();
276e0476371SMilton Miller 
277e0476371SMilton Miller 	if (unlikely(!smp_ops))
278e0476371SMilton Miller 		return;
279e0476371SMilton Miller 
280e0476371SMilton Miller 	for_each_online_cpu(cpu)
281e0476371SMilton Miller 		if (cpu != me)
2829ca980dcSPaul Mackerras 			do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
2835ad57078SPaul Mackerras }
2845ad57078SPaul Mackerras #endif
2855ad57078SPaul Mackerras 
286cc532915SMichael Ellerman #ifdef CONFIG_KEXEC
287cc532915SMichael Ellerman void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
288cc532915SMichael Ellerman {
289cc532915SMichael Ellerman 	crash_ipi_function_ptr = crash_ipi_callback;
290e0476371SMilton Miller 	if (crash_ipi_callback) {
291cc532915SMichael Ellerman 		mb();
292e0476371SMilton Miller 		smp_send_debugger_break();
293cc532915SMichael Ellerman 	}
294cc532915SMichael Ellerman }
295cc532915SMichael Ellerman #endif
296cc532915SMichael Ellerman 
2975ad57078SPaul Mackerras static void stop_this_cpu(void *dummy)
2985ad57078SPaul Mackerras {
2998389b37dSValentine Barshak 	/* Remove this CPU */
3008389b37dSValentine Barshak 	set_cpu_online(smp_processor_id(), false);
3018389b37dSValentine Barshak 
3025ad57078SPaul Mackerras 	local_irq_disable();
3035ad57078SPaul Mackerras 	while (1)
3045ad57078SPaul Mackerras 		;
3055ad57078SPaul Mackerras }
3065ad57078SPaul Mackerras 
3078fd7675cSSatyam Sharma void smp_send_stop(void)
3088fd7675cSSatyam Sharma {
3098691e5a8SJens Axboe 	smp_call_function(stop_this_cpu, NULL, 0);
3105ad57078SPaul Mackerras }
3115ad57078SPaul Mackerras 
3125ad57078SPaul Mackerras struct thread_info *current_set[NR_CPUS];
3135ad57078SPaul Mackerras 
3145ad57078SPaul Mackerras static void __devinit smp_store_cpu_info(int id)
3155ad57078SPaul Mackerras {
3166b7487fcSTejun Heo 	per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
3173160b097SBecky Bruce #ifdef CONFIG_PPC_FSL_BOOK3E
3183160b097SBecky Bruce 	per_cpu(next_tlbcam_idx, id)
3193160b097SBecky Bruce 		= (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
3203160b097SBecky Bruce #endif
3215ad57078SPaul Mackerras }
3225ad57078SPaul Mackerras 
3235ad57078SPaul Mackerras void __init smp_prepare_cpus(unsigned int max_cpus)
3245ad57078SPaul Mackerras {
3255ad57078SPaul Mackerras 	unsigned int cpu;
3265ad57078SPaul Mackerras 
3275ad57078SPaul Mackerras 	DBG("smp_prepare_cpus\n");
3285ad57078SPaul Mackerras 
3295ad57078SPaul Mackerras 	/*
3305ad57078SPaul Mackerras 	 * setup_cpu may need to be called on the boot cpu. We havent
3315ad57078SPaul Mackerras 	 * spun any cpus up but lets be paranoid.
3325ad57078SPaul Mackerras 	 */
3335ad57078SPaul Mackerras 	BUG_ON(boot_cpuid != smp_processor_id());
3345ad57078SPaul Mackerras 
3355ad57078SPaul Mackerras 	/* Fixup boot cpu */
3365ad57078SPaul Mackerras 	smp_store_cpu_info(boot_cpuid);
3375ad57078SPaul Mackerras 	cpu_callin_map[boot_cpuid] = 1;
3385ad57078SPaul Mackerras 
339cc1ba8eaSAnton Blanchard 	for_each_possible_cpu(cpu) {
340cc1ba8eaSAnton Blanchard 		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
341cc1ba8eaSAnton Blanchard 					GFP_KERNEL, cpu_to_node(cpu));
342cc1ba8eaSAnton Blanchard 		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
343cc1ba8eaSAnton Blanchard 					GFP_KERNEL, cpu_to_node(cpu));
344cc1ba8eaSAnton Blanchard 	}
345cc1ba8eaSAnton Blanchard 
346cc1ba8eaSAnton Blanchard 	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
347cc1ba8eaSAnton Blanchard 	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
348cc1ba8eaSAnton Blanchard 
3498cffc6acSBenjamin Herrenschmidt 	if (smp_ops)
350757cbd46SKumar Gala 		if (smp_ops->probe)
3515ad57078SPaul Mackerras 			max_cpus = smp_ops->probe();
3528cffc6acSBenjamin Herrenschmidt 		else
353757cbd46SKumar Gala 			max_cpus = NR_CPUS;
354757cbd46SKumar Gala 	else
3558cffc6acSBenjamin Herrenschmidt 		max_cpus = 1;
3565ad57078SPaul Mackerras }
3575ad57078SPaul Mackerras 
3585ad57078SPaul Mackerras void __devinit smp_prepare_boot_cpu(void)
3595ad57078SPaul Mackerras {
3605ad57078SPaul Mackerras 	BUG_ON(smp_processor_id() != boot_cpuid);
3615ad57078SPaul Mackerras #ifdef CONFIG_PPC64
3625ad57078SPaul Mackerras 	paca[boot_cpuid].__current = current;
3635ad57078SPaul Mackerras #endif
364b5e2fc1cSAl Viro 	current_set[boot_cpuid] = task_thread_info(current);
3655ad57078SPaul Mackerras }
3665ad57078SPaul Mackerras 
3675ad57078SPaul Mackerras #ifdef CONFIG_HOTPLUG_CPU
3685ad57078SPaul Mackerras 
3695ad57078SPaul Mackerras int generic_cpu_disable(void)
3705ad57078SPaul Mackerras {
3715ad57078SPaul Mackerras 	unsigned int cpu = smp_processor_id();
3725ad57078SPaul Mackerras 
3735ad57078SPaul Mackerras 	if (cpu == boot_cpuid)
3745ad57078SPaul Mackerras 		return -EBUSY;
3755ad57078SPaul Mackerras 
376ea0f1cabSRusty Russell 	set_cpu_online(cpu, false);
377799d6046SPaul Mackerras #ifdef CONFIG_PPC64
378a7f290daSBenjamin Herrenschmidt 	vdso_data->processorCount--;
379094fe2e7SPaul Mackerras #endif
3801c91cc57SBenjamin Herrenschmidt 	migrate_irqs();
3815ad57078SPaul Mackerras 	return 0;
3825ad57078SPaul Mackerras }
3835ad57078SPaul Mackerras 
3845ad57078SPaul Mackerras void generic_cpu_die(unsigned int cpu)
3855ad57078SPaul Mackerras {
3865ad57078SPaul Mackerras 	int i;
3875ad57078SPaul Mackerras 
3885ad57078SPaul Mackerras 	for (i = 0; i < 100; i++) {
3895ad57078SPaul Mackerras 		smp_rmb();
3905ad57078SPaul Mackerras 		if (per_cpu(cpu_state, cpu) == CPU_DEAD)
3915ad57078SPaul Mackerras 			return;
3925ad57078SPaul Mackerras 		msleep(100);
3935ad57078SPaul Mackerras 	}
3945ad57078SPaul Mackerras 	printk(KERN_ERR "CPU%d didn't die...\n", cpu);
3955ad57078SPaul Mackerras }
3965ad57078SPaul Mackerras 
3975ad57078SPaul Mackerras void generic_mach_cpu_die(void)
3985ad57078SPaul Mackerras {
3995ad57078SPaul Mackerras 	unsigned int cpu;
4005ad57078SPaul Mackerras 
4015ad57078SPaul Mackerras 	local_irq_disable();
4024fcb8833SBenjamin Herrenschmidt 	idle_task_exit();
4035ad57078SPaul Mackerras 	cpu = smp_processor_id();
4045ad57078SPaul Mackerras 	printk(KERN_DEBUG "CPU%d offline\n", cpu);
4055ad57078SPaul Mackerras 	__get_cpu_var(cpu_state) = CPU_DEAD;
4065ad57078SPaul Mackerras 	smp_wmb();
4075ad57078SPaul Mackerras 	while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
4085ad57078SPaul Mackerras 		cpu_relax();
4095ad57078SPaul Mackerras }
410105765f4SBenjamin Herrenschmidt 
411105765f4SBenjamin Herrenschmidt void generic_set_cpu_dead(unsigned int cpu)
412105765f4SBenjamin Herrenschmidt {
413105765f4SBenjamin Herrenschmidt 	per_cpu(cpu_state, cpu) = CPU_DEAD;
414105765f4SBenjamin Herrenschmidt }
415fb82b839SBenjamin Herrenschmidt 
416fb82b839SBenjamin Herrenschmidt int generic_check_cpu_restart(unsigned int cpu)
417fb82b839SBenjamin Herrenschmidt {
418fb82b839SBenjamin Herrenschmidt 	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
419fb82b839SBenjamin Herrenschmidt }
420*512691d4SPaul Mackerras 
421*512691d4SPaul Mackerras static atomic_t secondary_inhibit_count;
422*512691d4SPaul Mackerras 
423*512691d4SPaul Mackerras /*
424*512691d4SPaul Mackerras  * Don't allow secondary CPU threads to come online
425*512691d4SPaul Mackerras  */
426*512691d4SPaul Mackerras void inhibit_secondary_onlining(void)
427*512691d4SPaul Mackerras {
428*512691d4SPaul Mackerras 	/*
429*512691d4SPaul Mackerras 	 * This makes secondary_inhibit_count stable during cpu
430*512691d4SPaul Mackerras 	 * online/offline operations.
431*512691d4SPaul Mackerras 	 */
432*512691d4SPaul Mackerras 	get_online_cpus();
433*512691d4SPaul Mackerras 
434*512691d4SPaul Mackerras 	atomic_inc(&secondary_inhibit_count);
435*512691d4SPaul Mackerras 	put_online_cpus();
436*512691d4SPaul Mackerras }
437*512691d4SPaul Mackerras EXPORT_SYMBOL_GPL(inhibit_secondary_onlining);
438*512691d4SPaul Mackerras 
439*512691d4SPaul Mackerras /*
440*512691d4SPaul Mackerras  * Allow secondary CPU threads to come online again
441*512691d4SPaul Mackerras  */
442*512691d4SPaul Mackerras void uninhibit_secondary_onlining(void)
443*512691d4SPaul Mackerras {
444*512691d4SPaul Mackerras 	get_online_cpus();
445*512691d4SPaul Mackerras 	atomic_dec(&secondary_inhibit_count);
446*512691d4SPaul Mackerras 	put_online_cpus();
447*512691d4SPaul Mackerras }
448*512691d4SPaul Mackerras EXPORT_SYMBOL_GPL(uninhibit_secondary_onlining);
449*512691d4SPaul Mackerras 
450*512691d4SPaul Mackerras static int secondaries_inhibited(void)
451*512691d4SPaul Mackerras {
452*512691d4SPaul Mackerras 	return atomic_read(&secondary_inhibit_count);
453*512691d4SPaul Mackerras }
454*512691d4SPaul Mackerras 
455*512691d4SPaul Mackerras #else /* HOTPLUG_CPU */
456*512691d4SPaul Mackerras 
457*512691d4SPaul Mackerras #define secondaries_inhibited()		0
458*512691d4SPaul Mackerras 
4595ad57078SPaul Mackerras #endif
4605ad57078SPaul Mackerras 
46117e32eacSThomas Gleixner static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
462c56e5853SBenjamin Herrenschmidt {
46317e32eacSThomas Gleixner 	struct thread_info *ti = task_thread_info(idle);
464c56e5853SBenjamin Herrenschmidt 
465c56e5853SBenjamin Herrenschmidt #ifdef CONFIG_PPC64
46617e32eacSThomas Gleixner 	paca[cpu].__current = idle;
467c56e5853SBenjamin Herrenschmidt 	paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
468c56e5853SBenjamin Herrenschmidt #endif
469c56e5853SBenjamin Herrenschmidt 	ti->cpu = cpu;
47017e32eacSThomas Gleixner 	secondary_ti = current_set[cpu] = ti;
471c56e5853SBenjamin Herrenschmidt }
472c56e5853SBenjamin Herrenschmidt 
4738239c25fSThomas Gleixner int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
4745ad57078SPaul Mackerras {
475c56e5853SBenjamin Herrenschmidt 	int rc, c;
4765ad57078SPaul Mackerras 
477*512691d4SPaul Mackerras 	/*
478*512691d4SPaul Mackerras 	 * Don't allow secondary threads to come online if inhibited
479*512691d4SPaul Mackerras 	 */
480*512691d4SPaul Mackerras 	if (threads_per_core > 1 && secondaries_inhibited() &&
481*512691d4SPaul Mackerras 	    cpu % threads_per_core != 0)
482*512691d4SPaul Mackerras 		return -EBUSY;
483*512691d4SPaul Mackerras 
4848cffc6acSBenjamin Herrenschmidt 	if (smp_ops == NULL ||
4858cffc6acSBenjamin Herrenschmidt 	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
4865ad57078SPaul Mackerras 		return -EINVAL;
4875ad57078SPaul Mackerras 
48817e32eacSThomas Gleixner 	cpu_idle_thread_init(cpu, tidle);
489c560bbceSkerstin jonsson 
4905ad57078SPaul Mackerras 	/* Make sure callin-map entry is 0 (can be leftover a CPU
4915ad57078SPaul Mackerras 	 * hotplug
4925ad57078SPaul Mackerras 	 */
4935ad57078SPaul Mackerras 	cpu_callin_map[cpu] = 0;
4945ad57078SPaul Mackerras 
4955ad57078SPaul Mackerras 	/* The information for processor bringup must
4965ad57078SPaul Mackerras 	 * be written out to main store before we release
4975ad57078SPaul Mackerras 	 * the processor.
4985ad57078SPaul Mackerras 	 */
4995ad57078SPaul Mackerras 	smp_mb();
5005ad57078SPaul Mackerras 
5015ad57078SPaul Mackerras 	/* wake up cpus */
5025ad57078SPaul Mackerras 	DBG("smp: kicking cpu %d\n", cpu);
503de300974SMichael Ellerman 	rc = smp_ops->kick_cpu(cpu);
504de300974SMichael Ellerman 	if (rc) {
505de300974SMichael Ellerman 		pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
506de300974SMichael Ellerman 		return rc;
507de300974SMichael Ellerman 	}
5085ad57078SPaul Mackerras 
5095ad57078SPaul Mackerras 	/*
5105ad57078SPaul Mackerras 	 * wait to see if the cpu made a callin (is actually up).
5115ad57078SPaul Mackerras 	 * use this value that I found through experimentation.
5125ad57078SPaul Mackerras 	 * -- Cort
5135ad57078SPaul Mackerras 	 */
5145ad57078SPaul Mackerras 	if (system_state < SYSTEM_RUNNING)
515ee0339f2SJon Loeliger 		for (c = 50000; c && !cpu_callin_map[cpu]; c--)
5165ad57078SPaul Mackerras 			udelay(100);
5175ad57078SPaul Mackerras #ifdef CONFIG_HOTPLUG_CPU
5185ad57078SPaul Mackerras 	else
5195ad57078SPaul Mackerras 		/*
5205ad57078SPaul Mackerras 		 * CPUs can take much longer to come up in the
5215ad57078SPaul Mackerras 		 * hotplug case.  Wait five seconds.
5225ad57078SPaul Mackerras 		 */
52367764263SGautham R Shenoy 		for (c = 5000; c && !cpu_callin_map[cpu]; c--)
52467764263SGautham R Shenoy 			msleep(1);
5255ad57078SPaul Mackerras #endif
5265ad57078SPaul Mackerras 
5275ad57078SPaul Mackerras 	if (!cpu_callin_map[cpu]) {
5286685a477SSigned-off-by: Darren Hart 		printk(KERN_ERR "Processor %u is stuck.\n", cpu);
5295ad57078SPaul Mackerras 		return -ENOENT;
5305ad57078SPaul Mackerras 	}
5315ad57078SPaul Mackerras 
5326685a477SSigned-off-by: Darren Hart 	DBG("Processor %u found.\n", cpu);
5335ad57078SPaul Mackerras 
5345ad57078SPaul Mackerras 	if (smp_ops->give_timebase)
5355ad57078SPaul Mackerras 		smp_ops->give_timebase();
5365ad57078SPaul Mackerras 
5375ad57078SPaul Mackerras 	/* Wait until cpu puts itself in the online map */
5385ad57078SPaul Mackerras 	while (!cpu_online(cpu))
5395ad57078SPaul Mackerras 		cpu_relax();
5405ad57078SPaul Mackerras 
5415ad57078SPaul Mackerras 	return 0;
5425ad57078SPaul Mackerras }
5435ad57078SPaul Mackerras 
544e9efed3bSNathan Lynch /* Return the value of the reg property corresponding to the given
545e9efed3bSNathan Lynch  * logical cpu.
546e9efed3bSNathan Lynch  */
547e9efed3bSNathan Lynch int cpu_to_core_id(int cpu)
548e9efed3bSNathan Lynch {
549e9efed3bSNathan Lynch 	struct device_node *np;
550e9efed3bSNathan Lynch 	const int *reg;
551e9efed3bSNathan Lynch 	int id = -1;
552e9efed3bSNathan Lynch 
553e9efed3bSNathan Lynch 	np = of_get_cpu_node(cpu, NULL);
554e9efed3bSNathan Lynch 	if (!np)
555e9efed3bSNathan Lynch 		goto out;
556e9efed3bSNathan Lynch 
557e9efed3bSNathan Lynch 	reg = of_get_property(np, "reg", NULL);
558e9efed3bSNathan Lynch 	if (!reg)
559e9efed3bSNathan Lynch 		goto out;
560e9efed3bSNathan Lynch 
561e9efed3bSNathan Lynch 	id = *reg;
562e9efed3bSNathan Lynch out:
563e9efed3bSNathan Lynch 	of_node_put(np);
564e9efed3bSNathan Lynch 	return id;
565e9efed3bSNathan Lynch }
566e9efed3bSNathan Lynch 
56799d86705SVaidyanathan Srinivasan /* Helper routines for cpu to core mapping */
56899d86705SVaidyanathan Srinivasan int cpu_core_index_of_thread(int cpu)
56999d86705SVaidyanathan Srinivasan {
57099d86705SVaidyanathan Srinivasan 	return cpu >> threads_shift;
57199d86705SVaidyanathan Srinivasan }
57299d86705SVaidyanathan Srinivasan EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
57399d86705SVaidyanathan Srinivasan 
57499d86705SVaidyanathan Srinivasan int cpu_first_thread_of_core(int core)
57599d86705SVaidyanathan Srinivasan {
57699d86705SVaidyanathan Srinivasan 	return core << threads_shift;
57799d86705SVaidyanathan Srinivasan }
57899d86705SVaidyanathan Srinivasan EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
57999d86705SVaidyanathan Srinivasan 
580104699c0SKOSAKI Motohiro /* Must be called when no change can occur to cpu_present_mask,
581440a0857SNathan Lynch  * i.e. during cpu online or offline.
582440a0857SNathan Lynch  */
583440a0857SNathan Lynch static struct device_node *cpu_to_l2cache(int cpu)
584440a0857SNathan Lynch {
585440a0857SNathan Lynch 	struct device_node *np;
586b2ea25b9SNathan Lynch 	struct device_node *cache;
587440a0857SNathan Lynch 
588440a0857SNathan Lynch 	if (!cpu_present(cpu))
589440a0857SNathan Lynch 		return NULL;
590440a0857SNathan Lynch 
591440a0857SNathan Lynch 	np = of_get_cpu_node(cpu, NULL);
592440a0857SNathan Lynch 	if (np == NULL)
593440a0857SNathan Lynch 		return NULL;
594440a0857SNathan Lynch 
595b2ea25b9SNathan Lynch 	cache = of_find_next_cache_node(np);
596b2ea25b9SNathan Lynch 
597440a0857SNathan Lynch 	of_node_put(np);
598440a0857SNathan Lynch 
599b2ea25b9SNathan Lynch 	return cache;
600440a0857SNathan Lynch }
6015ad57078SPaul Mackerras 
6025ad57078SPaul Mackerras /* Activate a secondary processor. */
603fa3f82c8SBenjamin Herrenschmidt void __devinit start_secondary(void *unused)
6045ad57078SPaul Mackerras {
6055ad57078SPaul Mackerras 	unsigned int cpu = smp_processor_id();
606440a0857SNathan Lynch 	struct device_node *l2_cache;
607e2075f79SNathan Lynch 	int i, base;
6085ad57078SPaul Mackerras 
6095ad57078SPaul Mackerras 	atomic_inc(&init_mm.mm_count);
6105ad57078SPaul Mackerras 	current->active_mm = &init_mm;
6115ad57078SPaul Mackerras 
6125ad57078SPaul Mackerras 	smp_store_cpu_info(cpu);
6135ad57078SPaul Mackerras 	set_dec(tb_ticks_per_jiffy);
614e4d76e1cSAndrew Morton 	preempt_disable();
6155ad57078SPaul Mackerras 	cpu_callin_map[cpu] = 1;
6165ad57078SPaul Mackerras 
617757cbd46SKumar Gala 	if (smp_ops->setup_cpu)
6185ad57078SPaul Mackerras 		smp_ops->setup_cpu(cpu);
6195ad57078SPaul Mackerras 	if (smp_ops->take_timebase)
6205ad57078SPaul Mackerras 		smp_ops->take_timebase();
6215ad57078SPaul Mackerras 
622d831d0b8STony Breeds 	secondary_cpu_time_init();
623d831d0b8STony Breeds 
624aeeafbfaSBenjamin Herrenschmidt #ifdef CONFIG_PPC64
625aeeafbfaSBenjamin Herrenschmidt 	if (system_state == SYSTEM_RUNNING)
626aeeafbfaSBenjamin Herrenschmidt 		vdso_data->processorCount++;
62718ad51ddSAnton Blanchard 
62818ad51ddSAnton Blanchard 	vdso_getcpu_init();
629aeeafbfaSBenjamin Herrenschmidt #endif
630e545a614SManfred Spraul 	notify_cpu_starting(cpu);
631ea0f1cabSRusty Russell 	set_cpu_online(cpu, true);
632e2075f79SNathan Lynch 	/* Update sibling maps */
63399d86705SVaidyanathan Srinivasan 	base = cpu_first_thread_sibling(cpu);
634e2075f79SNathan Lynch 	for (i = 0; i < threads_per_core; i++) {
635e2075f79SNathan Lynch 		if (cpu_is_offline(base + i))
636e2075f79SNathan Lynch 			continue;
637cc1ba8eaSAnton Blanchard 		cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
638cc1ba8eaSAnton Blanchard 		cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
639440a0857SNathan Lynch 
640440a0857SNathan Lynch 		/* cpu_core_map should be a superset of
641440a0857SNathan Lynch 		 * cpu_sibling_map even if we don't have cache
642440a0857SNathan Lynch 		 * information, so update the former here, too.
643440a0857SNathan Lynch 		 */
644cc1ba8eaSAnton Blanchard 		cpumask_set_cpu(cpu, cpu_core_mask(base + i));
645cc1ba8eaSAnton Blanchard 		cpumask_set_cpu(base + i, cpu_core_mask(cpu));
646e2075f79SNathan Lynch 	}
647440a0857SNathan Lynch 	l2_cache = cpu_to_l2cache(cpu);
648440a0857SNathan Lynch 	for_each_online_cpu(i) {
649440a0857SNathan Lynch 		struct device_node *np = cpu_to_l2cache(i);
650440a0857SNathan Lynch 		if (!np)
651440a0857SNathan Lynch 			continue;
652440a0857SNathan Lynch 		if (np == l2_cache) {
653cc1ba8eaSAnton Blanchard 			cpumask_set_cpu(cpu, cpu_core_mask(i));
654cc1ba8eaSAnton Blanchard 			cpumask_set_cpu(i, cpu_core_mask(cpu));
655440a0857SNathan Lynch 		}
656440a0857SNathan Lynch 		of_node_put(np);
657440a0857SNathan Lynch 	}
658440a0857SNathan Lynch 	of_node_put(l2_cache);
6595ad57078SPaul Mackerras 
6605ad57078SPaul Mackerras 	local_irq_enable();
6615ad57078SPaul Mackerras 
6625ad57078SPaul Mackerras 	cpu_idle();
663fa3f82c8SBenjamin Herrenschmidt 
664fa3f82c8SBenjamin Herrenschmidt 	BUG();
6655ad57078SPaul Mackerras }
6665ad57078SPaul Mackerras 
6675ad57078SPaul Mackerras int setup_profiling_timer(unsigned int multiplier)
6685ad57078SPaul Mackerras {
6695ad57078SPaul Mackerras 	return 0;
6705ad57078SPaul Mackerras }
6715ad57078SPaul Mackerras 
6725ad57078SPaul Mackerras void __init smp_cpus_done(unsigned int max_cpus)
6735ad57078SPaul Mackerras {
674bfb9126dSAnton Blanchard 	cpumask_var_t old_mask;
6755ad57078SPaul Mackerras 
6765ad57078SPaul Mackerras 	/* We want the setup_cpu() here to be called from CPU 0, but our
6775ad57078SPaul Mackerras 	 * init thread may have been "borrowed" by another CPU in the meantime
6785ad57078SPaul Mackerras 	 * se we pin us down to CPU 0 for a short while
6795ad57078SPaul Mackerras 	 */
680bfb9126dSAnton Blanchard 	alloc_cpumask_var(&old_mask, GFP_NOWAIT);
681104699c0SKOSAKI Motohiro 	cpumask_copy(old_mask, tsk_cpus_allowed(current));
68221dbeb91SJulia Lawall 	set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
6835ad57078SPaul Mackerras 
684757cbd46SKumar Gala 	if (smp_ops && smp_ops->setup_cpu)
6855ad57078SPaul Mackerras 		smp_ops->setup_cpu(boot_cpuid);
6865ad57078SPaul Mackerras 
687bfb9126dSAnton Blanchard 	set_cpus_allowed_ptr(current, old_mask);
688bfb9126dSAnton Blanchard 
689bfb9126dSAnton Blanchard 	free_cpumask_var(old_mask);
6904b703a23SAnton Blanchard 
691d7294445SBenjamin Herrenschmidt 	if (smp_ops && smp_ops->bringup_done)
692d7294445SBenjamin Herrenschmidt 		smp_ops->bringup_done();
693d7294445SBenjamin Herrenschmidt 
6944b703a23SAnton Blanchard 	dump_numa_cpu_topology();
695d7294445SBenjamin Herrenschmidt 
6965ad57078SPaul Mackerras }
6975ad57078SPaul Mackerras 
698e1f0ece1SMichael Neuling int arch_sd_sibling_asym_packing(void)
699e1f0ece1SMichael Neuling {
700e1f0ece1SMichael Neuling 	if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
701e1f0ece1SMichael Neuling 		printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
702e1f0ece1SMichael Neuling 		return SD_ASYM_PACKING;
703e1f0ece1SMichael Neuling 	}
704e1f0ece1SMichael Neuling 	return 0;
705e1f0ece1SMichael Neuling }
706e1f0ece1SMichael Neuling 
7075ad57078SPaul Mackerras #ifdef CONFIG_HOTPLUG_CPU
7085ad57078SPaul Mackerras int __cpu_disable(void)
7095ad57078SPaul Mackerras {
710440a0857SNathan Lynch 	struct device_node *l2_cache;
711e2075f79SNathan Lynch 	int cpu = smp_processor_id();
712e2075f79SNathan Lynch 	int base, i;
713e2075f79SNathan Lynch 	int err;
7145ad57078SPaul Mackerras 
715e2075f79SNathan Lynch 	if (!smp_ops->cpu_disable)
7165ad57078SPaul Mackerras 		return -ENOSYS;
717e2075f79SNathan Lynch 
718e2075f79SNathan Lynch 	err = smp_ops->cpu_disable();
719e2075f79SNathan Lynch 	if (err)
720e2075f79SNathan Lynch 		return err;
721e2075f79SNathan Lynch 
722e2075f79SNathan Lynch 	/* Update sibling maps */
72399d86705SVaidyanathan Srinivasan 	base = cpu_first_thread_sibling(cpu);
724e2075f79SNathan Lynch 	for (i = 0; i < threads_per_core; i++) {
725cc1ba8eaSAnton Blanchard 		cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
726cc1ba8eaSAnton Blanchard 		cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
727cc1ba8eaSAnton Blanchard 		cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
728cc1ba8eaSAnton Blanchard 		cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
729e2075f79SNathan Lynch 	}
730e2075f79SNathan Lynch 
731440a0857SNathan Lynch 	l2_cache = cpu_to_l2cache(cpu);
732440a0857SNathan Lynch 	for_each_present_cpu(i) {
733440a0857SNathan Lynch 		struct device_node *np = cpu_to_l2cache(i);
734440a0857SNathan Lynch 		if (!np)
735440a0857SNathan Lynch 			continue;
736440a0857SNathan Lynch 		if (np == l2_cache) {
737cc1ba8eaSAnton Blanchard 			cpumask_clear_cpu(cpu, cpu_core_mask(i));
738cc1ba8eaSAnton Blanchard 			cpumask_clear_cpu(i, cpu_core_mask(cpu));
739440a0857SNathan Lynch 		}
740440a0857SNathan Lynch 		of_node_put(np);
741440a0857SNathan Lynch 	}
742440a0857SNathan Lynch 	of_node_put(l2_cache);
743440a0857SNathan Lynch 
744440a0857SNathan Lynch 
745e2075f79SNathan Lynch 	return 0;
7465ad57078SPaul Mackerras }
7475ad57078SPaul Mackerras 
7485ad57078SPaul Mackerras void __cpu_die(unsigned int cpu)
7495ad57078SPaul Mackerras {
7505ad57078SPaul Mackerras 	if (smp_ops->cpu_die)
7515ad57078SPaul Mackerras 		smp_ops->cpu_die(cpu);
7525ad57078SPaul Mackerras }
753d0174c72SNathan Fontenot 
754d0174c72SNathan Fontenot static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex);
755d0174c72SNathan Fontenot 
756d0174c72SNathan Fontenot void cpu_hotplug_driver_lock()
757d0174c72SNathan Fontenot {
758d0174c72SNathan Fontenot 	mutex_lock(&powerpc_cpu_hotplug_driver_mutex);
759d0174c72SNathan Fontenot }
760d0174c72SNathan Fontenot 
761d0174c72SNathan Fontenot void cpu_hotplug_driver_unlock()
762d0174c72SNathan Fontenot {
763d0174c72SNathan Fontenot 	mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
764d0174c72SNathan Fontenot }
765abb17f9cSMilton Miller 
766abb17f9cSMilton Miller void cpu_die(void)
767abb17f9cSMilton Miller {
768abb17f9cSMilton Miller 	if (ppc_md.cpu_die)
769abb17f9cSMilton Miller 		ppc_md.cpu_die();
770fa3f82c8SBenjamin Herrenschmidt 
771fa3f82c8SBenjamin Herrenschmidt 	/* If we return, we re-enter start_secondary */
772fa3f82c8SBenjamin Herrenschmidt 	start_secondary_resume();
773abb17f9cSMilton Miller }
774fa3f82c8SBenjamin Herrenschmidt 
7755ad57078SPaul Mackerras #endif
776