xref: /openbmc/linux/arch/powerpc/kernel/smp.c (revision e17769eb8c897101e2c6df62ec397e450b6e53b4)
15ad57078SPaul Mackerras /*
25ad57078SPaul Mackerras  * SMP support for ppc.
35ad57078SPaul Mackerras  *
45ad57078SPaul Mackerras  * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
55ad57078SPaul Mackerras  * deal of code from the sparc and intel versions.
65ad57078SPaul Mackerras  *
75ad57078SPaul Mackerras  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
85ad57078SPaul Mackerras  *
95ad57078SPaul Mackerras  * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
105ad57078SPaul Mackerras  * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
115ad57078SPaul Mackerras  *
125ad57078SPaul Mackerras  *      This program is free software; you can redistribute it and/or
135ad57078SPaul Mackerras  *      modify it under the terms of the GNU General Public License
145ad57078SPaul Mackerras  *      as published by the Free Software Foundation; either version
155ad57078SPaul Mackerras  *      2 of the License, or (at your option) any later version.
165ad57078SPaul Mackerras  */
175ad57078SPaul Mackerras 
185ad57078SPaul Mackerras #undef DEBUG
195ad57078SPaul Mackerras 
205ad57078SPaul Mackerras #include <linux/kernel.h>
214b16f8e2SPaul Gortmaker #include <linux/export.h>
225ad57078SPaul Mackerras #include <linux/sched.h>
235ad57078SPaul Mackerras #include <linux/smp.h>
245ad57078SPaul Mackerras #include <linux/interrupt.h>
255ad57078SPaul Mackerras #include <linux/delay.h>
265ad57078SPaul Mackerras #include <linux/init.h>
275ad57078SPaul Mackerras #include <linux/spinlock.h>
285ad57078SPaul Mackerras #include <linux/cache.h>
295ad57078SPaul Mackerras #include <linux/err.h>
308a25a2fdSKay Sievers #include <linux/device.h>
315ad57078SPaul Mackerras #include <linux/cpu.h>
325ad57078SPaul Mackerras #include <linux/notifier.h>
334b703a23SAnton Blanchard #include <linux/topology.h>
345ad57078SPaul Mackerras 
355ad57078SPaul Mackerras #include <asm/ptrace.h>
3660063497SArun Sharma #include <linux/atomic.h>
375ad57078SPaul Mackerras #include <asm/irq.h>
381b67bee1SSrivatsa S. Bhat #include <asm/hw_irq.h>
39441c19c8SMichael Ellerman #include <asm/kvm_ppc.h>
405ad57078SPaul Mackerras #include <asm/page.h>
415ad57078SPaul Mackerras #include <asm/pgtable.h>
425ad57078SPaul Mackerras #include <asm/prom.h>
435ad57078SPaul Mackerras #include <asm/smp.h>
445ad57078SPaul Mackerras #include <asm/time.h>
455ad57078SPaul Mackerras #include <asm/machdep.h>
46e2075f79SNathan Lynch #include <asm/cputhreads.h>
475ad57078SPaul Mackerras #include <asm/cputable.h>
485ad57078SPaul Mackerras #include <asm/mpic.h>
49a7f290daSBenjamin Herrenschmidt #include <asm/vdso_datapage.h>
505ad57078SPaul Mackerras #ifdef CONFIG_PPC64
515ad57078SPaul Mackerras #include <asm/paca.h>
525ad57078SPaul Mackerras #endif
5318ad51ddSAnton Blanchard #include <asm/vdso.h>
54ae3a197eSDavid Howells #include <asm/debug.h>
551217d34bSAnton Blanchard #include <asm/kexec.h>
565ad57078SPaul Mackerras 
575ad57078SPaul Mackerras #ifdef DEBUG
58f9e4ec57SMichael Ellerman #include <asm/udbg.h>
595ad57078SPaul Mackerras #define DBG(fmt...) udbg_printf(fmt)
605ad57078SPaul Mackerras #else
615ad57078SPaul Mackerras #define DBG(fmt...)
625ad57078SPaul Mackerras #endif
635ad57078SPaul Mackerras 
64c56e5853SBenjamin Herrenschmidt #ifdef CONFIG_HOTPLUG_CPU
65fb82b839SBenjamin Herrenschmidt /* State of each CPU during hotplug phases */
66fb82b839SBenjamin Herrenschmidt static DEFINE_PER_CPU(int, cpu_state) = { 0 };
67c56e5853SBenjamin Herrenschmidt #endif
68c56e5853SBenjamin Herrenschmidt 
69f9e4ec57SMichael Ellerman struct thread_info *secondary_ti;
70f9e4ec57SMichael Ellerman 
71cc1ba8eaSAnton Blanchard DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
72cc1ba8eaSAnton Blanchard DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
735ad57078SPaul Mackerras 
74d5a7430dSMike Travis EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
75440a0857SNathan Lynch EXPORT_PER_CPU_SYMBOL(cpu_core_map);
765ad57078SPaul Mackerras 
775ad57078SPaul Mackerras /* SMP operations for this machine */
785ad57078SPaul Mackerras struct smp_ops_t *smp_ops;
795ad57078SPaul Mackerras 
807ccbe504SBenjamin Herrenschmidt /* Can't be static due to PowerMac hackery */
817ccbe504SBenjamin Herrenschmidt volatile unsigned int cpu_callin_map[NR_CPUS];
825ad57078SPaul Mackerras 
835ad57078SPaul Mackerras int smt_enabled_at_boot = 1;
845ad57078SPaul Mackerras 
85cc532915SMichael Ellerman static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
86cc532915SMichael Ellerman 
873cd85250SAndy Fleming /*
883cd85250SAndy Fleming  * Returns 1 if the specified cpu should be brought up during boot.
893cd85250SAndy Fleming  * Used to inhibit booting threads if they've been disabled or
903cd85250SAndy Fleming  * limited on the command line
913cd85250SAndy Fleming  */
923cd85250SAndy Fleming int smp_generic_cpu_bootable(unsigned int nr)
933cd85250SAndy Fleming {
943cd85250SAndy Fleming 	/* Special case - we inhibit secondary thread startup
953cd85250SAndy Fleming 	 * during boot if the user requests it.
963cd85250SAndy Fleming 	 */
973cd85250SAndy Fleming 	if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
983cd85250SAndy Fleming 		if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
993cd85250SAndy Fleming 			return 0;
1003cd85250SAndy Fleming 		if (smt_enabled_at_boot
1013cd85250SAndy Fleming 		    && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
1023cd85250SAndy Fleming 			return 0;
1033cd85250SAndy Fleming 	}
1043cd85250SAndy Fleming 
1053cd85250SAndy Fleming 	return 1;
1063cd85250SAndy Fleming }
1073cd85250SAndy Fleming 
1083cd85250SAndy Fleming 
1095ad57078SPaul Mackerras #ifdef CONFIG_PPC64
110cad5cef6SGreg Kroah-Hartman int smp_generic_kick_cpu(int nr)
1115ad57078SPaul Mackerras {
1125ad57078SPaul Mackerras 	BUG_ON(nr < 0 || nr >= NR_CPUS);
1135ad57078SPaul Mackerras 
1145ad57078SPaul Mackerras 	/*
1155ad57078SPaul Mackerras 	 * The processor is currently spinning, waiting for the
1165ad57078SPaul Mackerras 	 * cpu_start field to become non-zero After we set cpu_start,
1175ad57078SPaul Mackerras 	 * the processor will continue on to secondary_start
1185ad57078SPaul Mackerras 	 */
119fb82b839SBenjamin Herrenschmidt 	if (!paca[nr].cpu_start) {
1205ad57078SPaul Mackerras 		paca[nr].cpu_start = 1;
1215ad57078SPaul Mackerras 		smp_mb();
122fb82b839SBenjamin Herrenschmidt 		return 0;
123fb82b839SBenjamin Herrenschmidt 	}
124fb82b839SBenjamin Herrenschmidt 
125fb82b839SBenjamin Herrenschmidt #ifdef CONFIG_HOTPLUG_CPU
126fb82b839SBenjamin Herrenschmidt 	/*
127fb82b839SBenjamin Herrenschmidt 	 * Ok it's not there, so it might be soft-unplugged, let's
128fb82b839SBenjamin Herrenschmidt 	 * try to bring it back
129fb82b839SBenjamin Herrenschmidt 	 */
130ae5cab47SZhao Chenhui 	generic_set_cpu_up(nr);
131fb82b839SBenjamin Herrenschmidt 	smp_wmb();
132fb82b839SBenjamin Herrenschmidt 	smp_send_reschedule(nr);
133fb82b839SBenjamin Herrenschmidt #endif /* CONFIG_HOTPLUG_CPU */
134de300974SMichael Ellerman 
135de300974SMichael Ellerman 	return 0;
1365ad57078SPaul Mackerras }
137fb82b839SBenjamin Herrenschmidt #endif /* CONFIG_PPC64 */
1385ad57078SPaul Mackerras 
13925ddd738SMilton Miller static irqreturn_t call_function_action(int irq, void *data)
14025ddd738SMilton Miller {
14125ddd738SMilton Miller 	generic_smp_call_function_interrupt();
14225ddd738SMilton Miller 	return IRQ_HANDLED;
14325ddd738SMilton Miller }
14425ddd738SMilton Miller 
14525ddd738SMilton Miller static irqreturn_t reschedule_action(int irq, void *data)
14625ddd738SMilton Miller {
147184748ccSPeter Zijlstra 	scheduler_ipi();
14825ddd738SMilton Miller 	return IRQ_HANDLED;
14925ddd738SMilton Miller }
15025ddd738SMilton Miller 
1511b67bee1SSrivatsa S. Bhat static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
15225ddd738SMilton Miller {
1531b67bee1SSrivatsa S. Bhat 	tick_broadcast_ipi_handler();
15425ddd738SMilton Miller 	return IRQ_HANDLED;
15525ddd738SMilton Miller }
15625ddd738SMilton Miller 
1577ef71d75SMilton Miller static irqreturn_t debug_ipi_action(int irq, void *data)
15825ddd738SMilton Miller {
15923d72bfdSMilton Miller 	if (crash_ipi_function_ptr) {
16023d72bfdSMilton Miller 		crash_ipi_function_ptr(get_irq_regs());
16123d72bfdSMilton Miller 		return IRQ_HANDLED;
16223d72bfdSMilton Miller 	}
16323d72bfdSMilton Miller 
16423d72bfdSMilton Miller #ifdef CONFIG_DEBUGGER
16523d72bfdSMilton Miller 	debugger_ipi(get_irq_regs());
16623d72bfdSMilton Miller #endif /* CONFIG_DEBUGGER */
16723d72bfdSMilton Miller 
16825ddd738SMilton Miller 	return IRQ_HANDLED;
16925ddd738SMilton Miller }
17025ddd738SMilton Miller 
17125ddd738SMilton Miller static irq_handler_t smp_ipi_action[] = {
17225ddd738SMilton Miller 	[PPC_MSG_CALL_FUNCTION] =  call_function_action,
17325ddd738SMilton Miller 	[PPC_MSG_RESCHEDULE] = reschedule_action,
1741b67bee1SSrivatsa S. Bhat 	[PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
17525ddd738SMilton Miller 	[PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
17625ddd738SMilton Miller };
17725ddd738SMilton Miller 
17825ddd738SMilton Miller const char *smp_ipi_name[] = {
17925ddd738SMilton Miller 	[PPC_MSG_CALL_FUNCTION] =  "ipi call function",
18025ddd738SMilton Miller 	[PPC_MSG_RESCHEDULE] = "ipi reschedule",
1811b67bee1SSrivatsa S. Bhat 	[PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
18225ddd738SMilton Miller 	[PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
18325ddd738SMilton Miller };
18425ddd738SMilton Miller 
18525ddd738SMilton Miller /* optional function to request ipi, for controllers with >= 4 ipis */
18625ddd738SMilton Miller int smp_request_message_ipi(int virq, int msg)
18725ddd738SMilton Miller {
18825ddd738SMilton Miller 	int err;
18925ddd738SMilton Miller 
19025ddd738SMilton Miller 	if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
19125ddd738SMilton Miller 		return -EINVAL;
19225ddd738SMilton Miller 	}
19325ddd738SMilton Miller #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
19425ddd738SMilton Miller 	if (msg == PPC_MSG_DEBUGGER_BREAK) {
19525ddd738SMilton Miller 		return 1;
19625ddd738SMilton Miller 	}
19725ddd738SMilton Miller #endif
1983b5e16d7SThomas Gleixner 	err = request_irq(virq, smp_ipi_action[msg],
199e6651de9SZhao Chenhui 			  IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
200b0d436c7SAnton Blanchard 			  smp_ipi_name[msg], NULL);
20125ddd738SMilton Miller 	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
20225ddd738SMilton Miller 		virq, smp_ipi_name[msg], err);
20325ddd738SMilton Miller 
20425ddd738SMilton Miller 	return err;
20525ddd738SMilton Miller }
20625ddd738SMilton Miller 
2071ece355bSMilton Miller #ifdef CONFIG_PPC_SMP_MUXED_IPI
20823d72bfdSMilton Miller struct cpu_messages {
209bd7f561fSSuresh Warrier 	long messages;			/* current messages */
21023d72bfdSMilton Miller 	unsigned long data;		/* data for cause ipi */
21123d72bfdSMilton Miller };
21223d72bfdSMilton Miller static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
21323d72bfdSMilton Miller 
21423d72bfdSMilton Miller void smp_muxed_ipi_set_data(int cpu, unsigned long data)
21523d72bfdSMilton Miller {
21623d72bfdSMilton Miller 	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
21723d72bfdSMilton Miller 
21823d72bfdSMilton Miller 	info->data = data;
21923d72bfdSMilton Miller }
22023d72bfdSMilton Miller 
22131639c77SSuresh Warrier void smp_muxed_ipi_set_message(int cpu, int msg)
22223d72bfdSMilton Miller {
22323d72bfdSMilton Miller 	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
22471454272SMilton Miller 	char *message = (char *)&info->messages;
22523d72bfdSMilton Miller 
2269fb1b36cSPaul Mackerras 	/*
2279fb1b36cSPaul Mackerras 	 * Order previous accesses before accesses in the IPI handler.
2289fb1b36cSPaul Mackerras 	 */
2299fb1b36cSPaul Mackerras 	smp_mb();
23071454272SMilton Miller 	message[msg] = 1;
23131639c77SSuresh Warrier }
23231639c77SSuresh Warrier 
23331639c77SSuresh Warrier void smp_muxed_ipi_message_pass(int cpu, int msg)
23431639c77SSuresh Warrier {
23531639c77SSuresh Warrier 	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
23631639c77SSuresh Warrier 
23731639c77SSuresh Warrier 	smp_muxed_ipi_set_message(cpu, msg);
2389fb1b36cSPaul Mackerras 	/*
2399fb1b36cSPaul Mackerras 	 * cause_ipi functions are required to include a full barrier
2409fb1b36cSPaul Mackerras 	 * before doing whatever causes the IPI.
2419fb1b36cSPaul Mackerras 	 */
24223d72bfdSMilton Miller 	smp_ops->cause_ipi(cpu, info->data);
24323d72bfdSMilton Miller }
24423d72bfdSMilton Miller 
2450654de1cSAnton Blanchard #ifdef __BIG_ENDIAN__
246bd7f561fSSuresh Warrier #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
2470654de1cSAnton Blanchard #else
248bd7f561fSSuresh Warrier #define IPI_MESSAGE(A) (1uL << (8 * (A)))
2490654de1cSAnton Blanchard #endif
2500654de1cSAnton Blanchard 
25123d72bfdSMilton Miller irqreturn_t smp_ipi_demux(void)
25223d72bfdSMilton Miller {
25369111bacSChristoph Lameter 	struct cpu_messages *info = this_cpu_ptr(&ipi_message);
254bd7f561fSSuresh Warrier 	unsigned long all;
25523d72bfdSMilton Miller 
25623d72bfdSMilton Miller 	mb();	/* order any irq clear */
25771454272SMilton Miller 
25871454272SMilton Miller 	do {
2599fb1b36cSPaul Mackerras 		all = xchg(&info->messages, 0);
260*e17769ebSSuresh E. Warrier #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
261*e17769ebSSuresh E. Warrier 		/*
262*e17769ebSSuresh E. Warrier 		 * Must check for PPC_MSG_RM_HOST_ACTION messages
263*e17769ebSSuresh E. Warrier 		 * before PPC_MSG_CALL_FUNCTION messages because when
264*e17769ebSSuresh E. Warrier 		 * a VM is destroyed, we call kick_all_cpus_sync()
265*e17769ebSSuresh E. Warrier 		 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
266*e17769ebSSuresh E. Warrier 		 * messages have completed before we free any VCPUs.
267*e17769ebSSuresh E. Warrier 		 */
268*e17769ebSSuresh E. Warrier 		if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
269*e17769ebSSuresh E. Warrier 			kvmppc_xics_ipi_action();
270*e17769ebSSuresh E. Warrier #endif
2710654de1cSAnton Blanchard 		if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
27223d72bfdSMilton Miller 			generic_smp_call_function_interrupt();
2730654de1cSAnton Blanchard 		if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
274880102e7SBenjamin Herrenschmidt 			scheduler_ipi();
2751b67bee1SSrivatsa S. Bhat 		if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
2761b67bee1SSrivatsa S. Bhat 			tick_broadcast_ipi_handler();
2770654de1cSAnton Blanchard 		if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK))
27823d72bfdSMilton Miller 			debug_ipi_action(0, NULL);
27971454272SMilton Miller 	} while (info->messages);
28071454272SMilton Miller 
28123d72bfdSMilton Miller 	return IRQ_HANDLED;
28223d72bfdSMilton Miller }
2831ece355bSMilton Miller #endif /* CONFIG_PPC_SMP_MUXED_IPI */
28423d72bfdSMilton Miller 
2859ca980dcSPaul Mackerras static inline void do_message_pass(int cpu, int msg)
2869ca980dcSPaul Mackerras {
2879ca980dcSPaul Mackerras 	if (smp_ops->message_pass)
2889ca980dcSPaul Mackerras 		smp_ops->message_pass(cpu, msg);
2899ca980dcSPaul Mackerras #ifdef CONFIG_PPC_SMP_MUXED_IPI
2909ca980dcSPaul Mackerras 	else
2919ca980dcSPaul Mackerras 		smp_muxed_ipi_message_pass(cpu, msg);
2929ca980dcSPaul Mackerras #endif
2939ca980dcSPaul Mackerras }
2949ca980dcSPaul Mackerras 
2955ad57078SPaul Mackerras void smp_send_reschedule(int cpu)
2965ad57078SPaul Mackerras {
2978cffc6acSBenjamin Herrenschmidt 	if (likely(smp_ops))
2989ca980dcSPaul Mackerras 		do_message_pass(cpu, PPC_MSG_RESCHEDULE);
2995ad57078SPaul Mackerras }
300de56a948SPaul Mackerras EXPORT_SYMBOL_GPL(smp_send_reschedule);
3015ad57078SPaul Mackerras 
302b7d7a240SJens Axboe void arch_send_call_function_single_ipi(int cpu)
303b7d7a240SJens Axboe {
304402d9a1eSSrivatsa S. Bhat 	do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
305b7d7a240SJens Axboe }
306b7d7a240SJens Axboe 
307f063ea02SRusty Russell void arch_send_call_function_ipi_mask(const struct cpumask *mask)
308b7d7a240SJens Axboe {
309b7d7a240SJens Axboe 	unsigned int cpu;
310b7d7a240SJens Axboe 
311f063ea02SRusty Russell 	for_each_cpu(cpu, mask)
3129ca980dcSPaul Mackerras 		do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
313b7d7a240SJens Axboe }
314b7d7a240SJens Axboe 
3151b67bee1SSrivatsa S. Bhat #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
3161b67bee1SSrivatsa S. Bhat void tick_broadcast(const struct cpumask *mask)
3171b67bee1SSrivatsa S. Bhat {
3181b67bee1SSrivatsa S. Bhat 	unsigned int cpu;
3191b67bee1SSrivatsa S. Bhat 
3201b67bee1SSrivatsa S. Bhat 	for_each_cpu(cpu, mask)
3211b67bee1SSrivatsa S. Bhat 		do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
3221b67bee1SSrivatsa S. Bhat }
3231b67bee1SSrivatsa S. Bhat #endif
3241b67bee1SSrivatsa S. Bhat 
325e0476371SMilton Miller #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
326e0476371SMilton Miller void smp_send_debugger_break(void)
3275ad57078SPaul Mackerras {
328e0476371SMilton Miller 	int cpu;
329e0476371SMilton Miller 	int me = raw_smp_processor_id();
330e0476371SMilton Miller 
331e0476371SMilton Miller 	if (unlikely(!smp_ops))
332e0476371SMilton Miller 		return;
333e0476371SMilton Miller 
334e0476371SMilton Miller 	for_each_online_cpu(cpu)
335e0476371SMilton Miller 		if (cpu != me)
3369ca980dcSPaul Mackerras 			do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
3375ad57078SPaul Mackerras }
3385ad57078SPaul Mackerras #endif
3395ad57078SPaul Mackerras 
340cc532915SMichael Ellerman #ifdef CONFIG_KEXEC
341cc532915SMichael Ellerman void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
342cc532915SMichael Ellerman {
343cc532915SMichael Ellerman 	crash_ipi_function_ptr = crash_ipi_callback;
344e0476371SMilton Miller 	if (crash_ipi_callback) {
345cc532915SMichael Ellerman 		mb();
346e0476371SMilton Miller 		smp_send_debugger_break();
347cc532915SMichael Ellerman 	}
348cc532915SMichael Ellerman }
349cc532915SMichael Ellerman #endif
350cc532915SMichael Ellerman 
3515ad57078SPaul Mackerras static void stop_this_cpu(void *dummy)
3525ad57078SPaul Mackerras {
3538389b37dSValentine Barshak 	/* Remove this CPU */
3548389b37dSValentine Barshak 	set_cpu_online(smp_processor_id(), false);
3558389b37dSValentine Barshak 
3565ad57078SPaul Mackerras 	local_irq_disable();
3575ad57078SPaul Mackerras 	while (1)
3585ad57078SPaul Mackerras 		;
3595ad57078SPaul Mackerras }
3605ad57078SPaul Mackerras 
3618fd7675cSSatyam Sharma void smp_send_stop(void)
3628fd7675cSSatyam Sharma {
3638691e5a8SJens Axboe 	smp_call_function(stop_this_cpu, NULL, 0);
3645ad57078SPaul Mackerras }
3655ad57078SPaul Mackerras 
3665ad57078SPaul Mackerras struct thread_info *current_set[NR_CPUS];
3675ad57078SPaul Mackerras 
368cad5cef6SGreg Kroah-Hartman static void smp_store_cpu_info(int id)
3695ad57078SPaul Mackerras {
3706b7487fcSTejun Heo 	per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
3713160b097SBecky Bruce #ifdef CONFIG_PPC_FSL_BOOK3E
3723160b097SBecky Bruce 	per_cpu(next_tlbcam_idx, id)
3733160b097SBecky Bruce 		= (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
3743160b097SBecky Bruce #endif
3755ad57078SPaul Mackerras }
3765ad57078SPaul Mackerras 
3775ad57078SPaul Mackerras void __init smp_prepare_cpus(unsigned int max_cpus)
3785ad57078SPaul Mackerras {
3795ad57078SPaul Mackerras 	unsigned int cpu;
3805ad57078SPaul Mackerras 
3815ad57078SPaul Mackerras 	DBG("smp_prepare_cpus\n");
3825ad57078SPaul Mackerras 
3835ad57078SPaul Mackerras 	/*
3845ad57078SPaul Mackerras 	 * setup_cpu may need to be called on the boot cpu. We havent
3855ad57078SPaul Mackerras 	 * spun any cpus up but lets be paranoid.
3865ad57078SPaul Mackerras 	 */
3875ad57078SPaul Mackerras 	BUG_ON(boot_cpuid != smp_processor_id());
3885ad57078SPaul Mackerras 
3895ad57078SPaul Mackerras 	/* Fixup boot cpu */
3905ad57078SPaul Mackerras 	smp_store_cpu_info(boot_cpuid);
3915ad57078SPaul Mackerras 	cpu_callin_map[boot_cpuid] = 1;
3925ad57078SPaul Mackerras 
393cc1ba8eaSAnton Blanchard 	for_each_possible_cpu(cpu) {
394cc1ba8eaSAnton Blanchard 		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
395cc1ba8eaSAnton Blanchard 					GFP_KERNEL, cpu_to_node(cpu));
396cc1ba8eaSAnton Blanchard 		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
397cc1ba8eaSAnton Blanchard 					GFP_KERNEL, cpu_to_node(cpu));
3982fabf084SNishanth Aravamudan 		/*
3992fabf084SNishanth Aravamudan 		 * numa_node_id() works after this.
4002fabf084SNishanth Aravamudan 		 */
401bc3c4327SLi Zhong 		if (cpu_present(cpu)) {
4022fabf084SNishanth Aravamudan 			set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
403bc3c4327SLi Zhong 			set_cpu_numa_mem(cpu,
404bc3c4327SLi Zhong 				local_memory_node(numa_cpu_lookup_table[cpu]));
405bc3c4327SLi Zhong 		}
406cc1ba8eaSAnton Blanchard 	}
407cc1ba8eaSAnton Blanchard 
408cc1ba8eaSAnton Blanchard 	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
409cc1ba8eaSAnton Blanchard 	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
410cc1ba8eaSAnton Blanchard 
411dfee0efeSChen Gang 	if (smp_ops && smp_ops->probe)
412dfee0efeSChen Gang 		smp_ops->probe();
4135ad57078SPaul Mackerras }
4145ad57078SPaul Mackerras 
415cad5cef6SGreg Kroah-Hartman void smp_prepare_boot_cpu(void)
4165ad57078SPaul Mackerras {
4175ad57078SPaul Mackerras 	BUG_ON(smp_processor_id() != boot_cpuid);
4185ad57078SPaul Mackerras #ifdef CONFIG_PPC64
4195ad57078SPaul Mackerras 	paca[boot_cpuid].__current = current;
4205ad57078SPaul Mackerras #endif
4218c272261SNishanth Aravamudan 	set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
422b5e2fc1cSAl Viro 	current_set[boot_cpuid] = task_thread_info(current);
4235ad57078SPaul Mackerras }
4245ad57078SPaul Mackerras 
4255ad57078SPaul Mackerras #ifdef CONFIG_HOTPLUG_CPU
4265ad57078SPaul Mackerras 
4275ad57078SPaul Mackerras int generic_cpu_disable(void)
4285ad57078SPaul Mackerras {
4295ad57078SPaul Mackerras 	unsigned int cpu = smp_processor_id();
4305ad57078SPaul Mackerras 
4315ad57078SPaul Mackerras 	if (cpu == boot_cpuid)
4325ad57078SPaul Mackerras 		return -EBUSY;
4335ad57078SPaul Mackerras 
434ea0f1cabSRusty Russell 	set_cpu_online(cpu, false);
435799d6046SPaul Mackerras #ifdef CONFIG_PPC64
436a7f290daSBenjamin Herrenschmidt 	vdso_data->processorCount--;
437094fe2e7SPaul Mackerras #endif
4381c91cc57SBenjamin Herrenschmidt 	migrate_irqs();
4395ad57078SPaul Mackerras 	return 0;
4405ad57078SPaul Mackerras }
4415ad57078SPaul Mackerras 
4425ad57078SPaul Mackerras void generic_cpu_die(unsigned int cpu)
4435ad57078SPaul Mackerras {
4445ad57078SPaul Mackerras 	int i;
4455ad57078SPaul Mackerras 
4465ad57078SPaul Mackerras 	for (i = 0; i < 100; i++) {
4475ad57078SPaul Mackerras 		smp_rmb();
4485ad57078SPaul Mackerras 		if (per_cpu(cpu_state, cpu) == CPU_DEAD)
4495ad57078SPaul Mackerras 			return;
4505ad57078SPaul Mackerras 		msleep(100);
4515ad57078SPaul Mackerras 	}
4525ad57078SPaul Mackerras 	printk(KERN_ERR "CPU%d didn't die...\n", cpu);
4535ad57078SPaul Mackerras }
4545ad57078SPaul Mackerras 
455105765f4SBenjamin Herrenschmidt void generic_set_cpu_dead(unsigned int cpu)
456105765f4SBenjamin Herrenschmidt {
457105765f4SBenjamin Herrenschmidt 	per_cpu(cpu_state, cpu) = CPU_DEAD;
458105765f4SBenjamin Herrenschmidt }
459fb82b839SBenjamin Herrenschmidt 
460ae5cab47SZhao Chenhui /*
461ae5cab47SZhao Chenhui  * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
462ae5cab47SZhao Chenhui  * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
463ae5cab47SZhao Chenhui  * which makes the delay in generic_cpu_die() not happen.
464ae5cab47SZhao Chenhui  */
465ae5cab47SZhao Chenhui void generic_set_cpu_up(unsigned int cpu)
466ae5cab47SZhao Chenhui {
467ae5cab47SZhao Chenhui 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
468ae5cab47SZhao Chenhui }
469ae5cab47SZhao Chenhui 
470fb82b839SBenjamin Herrenschmidt int generic_check_cpu_restart(unsigned int cpu)
471fb82b839SBenjamin Herrenschmidt {
472fb82b839SBenjamin Herrenschmidt 	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
473fb82b839SBenjamin Herrenschmidt }
474512691d4SPaul Mackerras 
475441c19c8SMichael Ellerman static bool secondaries_inhibited(void)
476512691d4SPaul Mackerras {
477441c19c8SMichael Ellerman 	return kvm_hv_mode_active();
478512691d4SPaul Mackerras }
479512691d4SPaul Mackerras 
480512691d4SPaul Mackerras #else /* HOTPLUG_CPU */
481512691d4SPaul Mackerras 
482512691d4SPaul Mackerras #define secondaries_inhibited()		0
483512691d4SPaul Mackerras 
4845ad57078SPaul Mackerras #endif
4855ad57078SPaul Mackerras 
48617e32eacSThomas Gleixner static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
487c56e5853SBenjamin Herrenschmidt {
48817e32eacSThomas Gleixner 	struct thread_info *ti = task_thread_info(idle);
489c56e5853SBenjamin Herrenschmidt 
490c56e5853SBenjamin Herrenschmidt #ifdef CONFIG_PPC64
49117e32eacSThomas Gleixner 	paca[cpu].__current = idle;
492c56e5853SBenjamin Herrenschmidt 	paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
493c56e5853SBenjamin Herrenschmidt #endif
494c56e5853SBenjamin Herrenschmidt 	ti->cpu = cpu;
49517e32eacSThomas Gleixner 	secondary_ti = current_set[cpu] = ti;
496c56e5853SBenjamin Herrenschmidt }
497c56e5853SBenjamin Herrenschmidt 
498061d19f2SPaul Gortmaker int __cpu_up(unsigned int cpu, struct task_struct *tidle)
4995ad57078SPaul Mackerras {
500c56e5853SBenjamin Herrenschmidt 	int rc, c;
5015ad57078SPaul Mackerras 
502512691d4SPaul Mackerras 	/*
503512691d4SPaul Mackerras 	 * Don't allow secondary threads to come online if inhibited
504512691d4SPaul Mackerras 	 */
505512691d4SPaul Mackerras 	if (threads_per_core > 1 && secondaries_inhibited() &&
5066f5e40a3SMichael Ellerman 	    cpu_thread_in_subcore(cpu))
507512691d4SPaul Mackerras 		return -EBUSY;
508512691d4SPaul Mackerras 
5098cffc6acSBenjamin Herrenschmidt 	if (smp_ops == NULL ||
5108cffc6acSBenjamin Herrenschmidt 	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
5115ad57078SPaul Mackerras 		return -EINVAL;
5125ad57078SPaul Mackerras 
51317e32eacSThomas Gleixner 	cpu_idle_thread_init(cpu, tidle);
514c560bbceSkerstin jonsson 
5155ad57078SPaul Mackerras 	/* Make sure callin-map entry is 0 (can be leftover a CPU
5165ad57078SPaul Mackerras 	 * hotplug
5175ad57078SPaul Mackerras 	 */
5185ad57078SPaul Mackerras 	cpu_callin_map[cpu] = 0;
5195ad57078SPaul Mackerras 
5205ad57078SPaul Mackerras 	/* The information for processor bringup must
5215ad57078SPaul Mackerras 	 * be written out to main store before we release
5225ad57078SPaul Mackerras 	 * the processor.
5235ad57078SPaul Mackerras 	 */
5245ad57078SPaul Mackerras 	smp_mb();
5255ad57078SPaul Mackerras 
5265ad57078SPaul Mackerras 	/* wake up cpus */
5275ad57078SPaul Mackerras 	DBG("smp: kicking cpu %d\n", cpu);
528de300974SMichael Ellerman 	rc = smp_ops->kick_cpu(cpu);
529de300974SMichael Ellerman 	if (rc) {
530de300974SMichael Ellerman 		pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
531de300974SMichael Ellerman 		return rc;
532de300974SMichael Ellerman 	}
5335ad57078SPaul Mackerras 
5345ad57078SPaul Mackerras 	/*
5355ad57078SPaul Mackerras 	 * wait to see if the cpu made a callin (is actually up).
5365ad57078SPaul Mackerras 	 * use this value that I found through experimentation.
5375ad57078SPaul Mackerras 	 * -- Cort
5385ad57078SPaul Mackerras 	 */
5395ad57078SPaul Mackerras 	if (system_state < SYSTEM_RUNNING)
540ee0339f2SJon Loeliger 		for (c = 50000; c && !cpu_callin_map[cpu]; c--)
5415ad57078SPaul Mackerras 			udelay(100);
5425ad57078SPaul Mackerras #ifdef CONFIG_HOTPLUG_CPU
5435ad57078SPaul Mackerras 	else
5445ad57078SPaul Mackerras 		/*
5455ad57078SPaul Mackerras 		 * CPUs can take much longer to come up in the
5465ad57078SPaul Mackerras 		 * hotplug case.  Wait five seconds.
5475ad57078SPaul Mackerras 		 */
54867764263SGautham R Shenoy 		for (c = 5000; c && !cpu_callin_map[cpu]; c--)
54967764263SGautham R Shenoy 			msleep(1);
5505ad57078SPaul Mackerras #endif
5515ad57078SPaul Mackerras 
5525ad57078SPaul Mackerras 	if (!cpu_callin_map[cpu]) {
5536685a477SSigned-off-by: Darren Hart 		printk(KERN_ERR "Processor %u is stuck.\n", cpu);
5545ad57078SPaul Mackerras 		return -ENOENT;
5555ad57078SPaul Mackerras 	}
5565ad57078SPaul Mackerras 
5576685a477SSigned-off-by: Darren Hart 	DBG("Processor %u found.\n", cpu);
5585ad57078SPaul Mackerras 
5595ad57078SPaul Mackerras 	if (smp_ops->give_timebase)
5605ad57078SPaul Mackerras 		smp_ops->give_timebase();
5615ad57078SPaul Mackerras 
562875ebe94SMichael Ellerman 	/* Wait until cpu puts itself in the online & active maps */
563875ebe94SMichael Ellerman 	while (!cpu_online(cpu) || !cpu_active(cpu))
5645ad57078SPaul Mackerras 		cpu_relax();
5655ad57078SPaul Mackerras 
5665ad57078SPaul Mackerras 	return 0;
5675ad57078SPaul Mackerras }
5685ad57078SPaul Mackerras 
569e9efed3bSNathan Lynch /* Return the value of the reg property corresponding to the given
570e9efed3bSNathan Lynch  * logical cpu.
571e9efed3bSNathan Lynch  */
572e9efed3bSNathan Lynch int cpu_to_core_id(int cpu)
573e9efed3bSNathan Lynch {
574e9efed3bSNathan Lynch 	struct device_node *np;
575f8a1883aSAnton Blanchard 	const __be32 *reg;
576e9efed3bSNathan Lynch 	int id = -1;
577e9efed3bSNathan Lynch 
578e9efed3bSNathan Lynch 	np = of_get_cpu_node(cpu, NULL);
579e9efed3bSNathan Lynch 	if (!np)
580e9efed3bSNathan Lynch 		goto out;
581e9efed3bSNathan Lynch 
582e9efed3bSNathan Lynch 	reg = of_get_property(np, "reg", NULL);
583e9efed3bSNathan Lynch 	if (!reg)
584e9efed3bSNathan Lynch 		goto out;
585e9efed3bSNathan Lynch 
586f8a1883aSAnton Blanchard 	id = be32_to_cpup(reg);
587e9efed3bSNathan Lynch out:
588e9efed3bSNathan Lynch 	of_node_put(np);
589e9efed3bSNathan Lynch 	return id;
590e9efed3bSNathan Lynch }
591e9efed3bSNathan Lynch 
59299d86705SVaidyanathan Srinivasan /* Helper routines for cpu to core mapping */
59399d86705SVaidyanathan Srinivasan int cpu_core_index_of_thread(int cpu)
59499d86705SVaidyanathan Srinivasan {
59599d86705SVaidyanathan Srinivasan 	return cpu >> threads_shift;
59699d86705SVaidyanathan Srinivasan }
59799d86705SVaidyanathan Srinivasan EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
59899d86705SVaidyanathan Srinivasan 
59999d86705SVaidyanathan Srinivasan int cpu_first_thread_of_core(int core)
60099d86705SVaidyanathan Srinivasan {
60199d86705SVaidyanathan Srinivasan 	return core << threads_shift;
60299d86705SVaidyanathan Srinivasan }
60399d86705SVaidyanathan Srinivasan EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
60499d86705SVaidyanathan Srinivasan 
605256f2d4bSPaul Mackerras static void traverse_siblings_chip_id(int cpu, bool add, int chipid)
606256f2d4bSPaul Mackerras {
607256f2d4bSPaul Mackerras 	const struct cpumask *mask;
608256f2d4bSPaul Mackerras 	struct device_node *np;
609256f2d4bSPaul Mackerras 	int i, plen;
610256f2d4bSPaul Mackerras 	const __be32 *prop;
611256f2d4bSPaul Mackerras 
612256f2d4bSPaul Mackerras 	mask = add ? cpu_online_mask : cpu_present_mask;
613256f2d4bSPaul Mackerras 	for_each_cpu(i, mask) {
614256f2d4bSPaul Mackerras 		np = of_get_cpu_node(i, NULL);
615256f2d4bSPaul Mackerras 		if (!np)
616256f2d4bSPaul Mackerras 			continue;
617256f2d4bSPaul Mackerras 		prop = of_get_property(np, "ibm,chip-id", &plen);
618256f2d4bSPaul Mackerras 		if (prop && plen == sizeof(int) &&
619256f2d4bSPaul Mackerras 		    of_read_number(prop, 1) == chipid) {
620256f2d4bSPaul Mackerras 			if (add) {
621256f2d4bSPaul Mackerras 				cpumask_set_cpu(cpu, cpu_core_mask(i));
622256f2d4bSPaul Mackerras 				cpumask_set_cpu(i, cpu_core_mask(cpu));
623256f2d4bSPaul Mackerras 			} else {
624256f2d4bSPaul Mackerras 				cpumask_clear_cpu(cpu, cpu_core_mask(i));
625256f2d4bSPaul Mackerras 				cpumask_clear_cpu(i, cpu_core_mask(cpu));
626256f2d4bSPaul Mackerras 			}
627256f2d4bSPaul Mackerras 		}
628256f2d4bSPaul Mackerras 		of_node_put(np);
629256f2d4bSPaul Mackerras 	}
630256f2d4bSPaul Mackerras }
631256f2d4bSPaul Mackerras 
632104699c0SKOSAKI Motohiro /* Must be called when no change can occur to cpu_present_mask,
633440a0857SNathan Lynch  * i.e. during cpu online or offline.
634440a0857SNathan Lynch  */
635440a0857SNathan Lynch static struct device_node *cpu_to_l2cache(int cpu)
636440a0857SNathan Lynch {
637440a0857SNathan Lynch 	struct device_node *np;
638b2ea25b9SNathan Lynch 	struct device_node *cache;
639440a0857SNathan Lynch 
640440a0857SNathan Lynch 	if (!cpu_present(cpu))
641440a0857SNathan Lynch 		return NULL;
642440a0857SNathan Lynch 
643440a0857SNathan Lynch 	np = of_get_cpu_node(cpu, NULL);
644440a0857SNathan Lynch 	if (np == NULL)
645440a0857SNathan Lynch 		return NULL;
646440a0857SNathan Lynch 
647b2ea25b9SNathan Lynch 	cache = of_find_next_cache_node(np);
648b2ea25b9SNathan Lynch 
649440a0857SNathan Lynch 	of_node_put(np);
650440a0857SNathan Lynch 
651b2ea25b9SNathan Lynch 	return cache;
652440a0857SNathan Lynch }
6535ad57078SPaul Mackerras 
654a8a5356cSPaul Mackerras static void traverse_core_siblings(int cpu, bool add)
655a8a5356cSPaul Mackerras {
656256f2d4bSPaul Mackerras 	struct device_node *l2_cache, *np;
657a8a5356cSPaul Mackerras 	const struct cpumask *mask;
658256f2d4bSPaul Mackerras 	int i, chip, plen;
659256f2d4bSPaul Mackerras 	const __be32 *prop;
660256f2d4bSPaul Mackerras 
661256f2d4bSPaul Mackerras 	/* First see if we have ibm,chip-id properties in cpu nodes */
662256f2d4bSPaul Mackerras 	np = of_get_cpu_node(cpu, NULL);
663256f2d4bSPaul Mackerras 	if (np) {
664256f2d4bSPaul Mackerras 		chip = -1;
665256f2d4bSPaul Mackerras 		prop = of_get_property(np, "ibm,chip-id", &plen);
666256f2d4bSPaul Mackerras 		if (prop && plen == sizeof(int))
667256f2d4bSPaul Mackerras 			chip = of_read_number(prop, 1);
668256f2d4bSPaul Mackerras 		of_node_put(np);
669256f2d4bSPaul Mackerras 		if (chip >= 0) {
670256f2d4bSPaul Mackerras 			traverse_siblings_chip_id(cpu, add, chip);
671256f2d4bSPaul Mackerras 			return;
672256f2d4bSPaul Mackerras 		}
673256f2d4bSPaul Mackerras 	}
674a8a5356cSPaul Mackerras 
675a8a5356cSPaul Mackerras 	l2_cache = cpu_to_l2cache(cpu);
676a8a5356cSPaul Mackerras 	mask = add ? cpu_online_mask : cpu_present_mask;
677a8a5356cSPaul Mackerras 	for_each_cpu(i, mask) {
678256f2d4bSPaul Mackerras 		np = cpu_to_l2cache(i);
679a8a5356cSPaul Mackerras 		if (!np)
680a8a5356cSPaul Mackerras 			continue;
681a8a5356cSPaul Mackerras 		if (np == l2_cache) {
682a8a5356cSPaul Mackerras 			if (add) {
683a8a5356cSPaul Mackerras 				cpumask_set_cpu(cpu, cpu_core_mask(i));
684a8a5356cSPaul Mackerras 				cpumask_set_cpu(i, cpu_core_mask(cpu));
685a8a5356cSPaul Mackerras 			} else {
686a8a5356cSPaul Mackerras 				cpumask_clear_cpu(cpu, cpu_core_mask(i));
687a8a5356cSPaul Mackerras 				cpumask_clear_cpu(i, cpu_core_mask(cpu));
688a8a5356cSPaul Mackerras 			}
689a8a5356cSPaul Mackerras 		}
690a8a5356cSPaul Mackerras 		of_node_put(np);
691a8a5356cSPaul Mackerras 	}
692a8a5356cSPaul Mackerras 	of_node_put(l2_cache);
693a8a5356cSPaul Mackerras }
694a8a5356cSPaul Mackerras 
6955ad57078SPaul Mackerras /* Activate a secondary processor. */
696061d19f2SPaul Gortmaker void start_secondary(void *unused)
6975ad57078SPaul Mackerras {
6985ad57078SPaul Mackerras 	unsigned int cpu = smp_processor_id();
699e2075f79SNathan Lynch 	int i, base;
7005ad57078SPaul Mackerras 
7015ad57078SPaul Mackerras 	atomic_inc(&init_mm.mm_count);
7025ad57078SPaul Mackerras 	current->active_mm = &init_mm;
7035ad57078SPaul Mackerras 
7045ad57078SPaul Mackerras 	smp_store_cpu_info(cpu);
7055ad57078SPaul Mackerras 	set_dec(tb_ticks_per_jiffy);
706e4d76e1cSAndrew Morton 	preempt_disable();
7071be6f10fSMichael Ellerman 	cpu_callin_map[cpu] = 1;
7085ad57078SPaul Mackerras 
709757cbd46SKumar Gala 	if (smp_ops->setup_cpu)
7105ad57078SPaul Mackerras 		smp_ops->setup_cpu(cpu);
7115ad57078SPaul Mackerras 	if (smp_ops->take_timebase)
7125ad57078SPaul Mackerras 		smp_ops->take_timebase();
7135ad57078SPaul Mackerras 
714d831d0b8STony Breeds 	secondary_cpu_time_init();
715d831d0b8STony Breeds 
716aeeafbfaSBenjamin Herrenschmidt #ifdef CONFIG_PPC64
717aeeafbfaSBenjamin Herrenschmidt 	if (system_state == SYSTEM_RUNNING)
718aeeafbfaSBenjamin Herrenschmidt 		vdso_data->processorCount++;
71918ad51ddSAnton Blanchard 
72018ad51ddSAnton Blanchard 	vdso_getcpu_init();
721aeeafbfaSBenjamin Herrenschmidt #endif
722e2075f79SNathan Lynch 	/* Update sibling maps */
72399d86705SVaidyanathan Srinivasan 	base = cpu_first_thread_sibling(cpu);
724e2075f79SNathan Lynch 	for (i = 0; i < threads_per_core; i++) {
725cce606feSLi Zhong 		if (cpu_is_offline(base + i) && (cpu != base + i))
726e2075f79SNathan Lynch 			continue;
727cc1ba8eaSAnton Blanchard 		cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
728cc1ba8eaSAnton Blanchard 		cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
729440a0857SNathan Lynch 
730440a0857SNathan Lynch 		/* cpu_core_map should be a superset of
731440a0857SNathan Lynch 		 * cpu_sibling_map even if we don't have cache
732440a0857SNathan Lynch 		 * information, so update the former here, too.
733440a0857SNathan Lynch 		 */
734cc1ba8eaSAnton Blanchard 		cpumask_set_cpu(cpu, cpu_core_mask(base + i));
735cc1ba8eaSAnton Blanchard 		cpumask_set_cpu(base + i, cpu_core_mask(cpu));
736e2075f79SNathan Lynch 	}
737a8a5356cSPaul Mackerras 	traverse_core_siblings(cpu, true);
7385ad57078SPaul Mackerras 
739bc3c4327SLi Zhong 	set_numa_node(numa_cpu_lookup_table[cpu]);
740bc3c4327SLi Zhong 	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
741bc3c4327SLi Zhong 
742cce606feSLi Zhong 	smp_wmb();
743cce606feSLi Zhong 	notify_cpu_starting(cpu);
744cce606feSLi Zhong 	set_cpu_online(cpu, true);
745cce606feSLi Zhong 
7465ad57078SPaul Mackerras 	local_irq_enable();
7475ad57078SPaul Mackerras 
748799fef06SThomas Gleixner 	cpu_startup_entry(CPUHP_ONLINE);
749fa3f82c8SBenjamin Herrenschmidt 
750fa3f82c8SBenjamin Herrenschmidt 	BUG();
7515ad57078SPaul Mackerras }
7525ad57078SPaul Mackerras 
7535ad57078SPaul Mackerras int setup_profiling_timer(unsigned int multiplier)
7545ad57078SPaul Mackerras {
7555ad57078SPaul Mackerras 	return 0;
7565ad57078SPaul Mackerras }
7575ad57078SPaul Mackerras 
758607b45e9SVincent Guittot #ifdef CONFIG_SCHED_SMT
759607b45e9SVincent Guittot /* cpumask of CPUs with asymetric SMT dependancy */
760b6220ad6SGuenter Roeck static int powerpc_smt_flags(void)
761607b45e9SVincent Guittot {
7625d4dfdddSNicolas Pitre 	int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
763607b45e9SVincent Guittot 
764607b45e9SVincent Guittot 	if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
765607b45e9SVincent Guittot 		printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
766607b45e9SVincent Guittot 		flags |= SD_ASYM_PACKING;
767607b45e9SVincent Guittot 	}
768607b45e9SVincent Guittot 	return flags;
769607b45e9SVincent Guittot }
770607b45e9SVincent Guittot #endif
771607b45e9SVincent Guittot 
772607b45e9SVincent Guittot static struct sched_domain_topology_level powerpc_topology[] = {
773607b45e9SVincent Guittot #ifdef CONFIG_SCHED_SMT
774607b45e9SVincent Guittot 	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
775607b45e9SVincent Guittot #endif
776607b45e9SVincent Guittot 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
777607b45e9SVincent Guittot 	{ NULL, },
778607b45e9SVincent Guittot };
779607b45e9SVincent Guittot 
7805ad57078SPaul Mackerras void __init smp_cpus_done(unsigned int max_cpus)
7815ad57078SPaul Mackerras {
782bfb9126dSAnton Blanchard 	cpumask_var_t old_mask;
7835ad57078SPaul Mackerras 
7845ad57078SPaul Mackerras 	/* We want the setup_cpu() here to be called from CPU 0, but our
7855ad57078SPaul Mackerras 	 * init thread may have been "borrowed" by another CPU in the meantime
7865ad57078SPaul Mackerras 	 * se we pin us down to CPU 0 for a short while
7875ad57078SPaul Mackerras 	 */
788bfb9126dSAnton Blanchard 	alloc_cpumask_var(&old_mask, GFP_NOWAIT);
789104699c0SKOSAKI Motohiro 	cpumask_copy(old_mask, tsk_cpus_allowed(current));
79021dbeb91SJulia Lawall 	set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
7915ad57078SPaul Mackerras 
792757cbd46SKumar Gala 	if (smp_ops && smp_ops->setup_cpu)
7935ad57078SPaul Mackerras 		smp_ops->setup_cpu(boot_cpuid);
7945ad57078SPaul Mackerras 
795bfb9126dSAnton Blanchard 	set_cpus_allowed_ptr(current, old_mask);
796bfb9126dSAnton Blanchard 
797bfb9126dSAnton Blanchard 	free_cpumask_var(old_mask);
7984b703a23SAnton Blanchard 
799d7294445SBenjamin Herrenschmidt 	if (smp_ops && smp_ops->bringup_done)
800d7294445SBenjamin Herrenschmidt 		smp_ops->bringup_done();
801d7294445SBenjamin Herrenschmidt 
8024b703a23SAnton Blanchard 	dump_numa_cpu_topology();
803d7294445SBenjamin Herrenschmidt 
804607b45e9SVincent Guittot 	set_sched_topology(powerpc_topology);
8055ad57078SPaul Mackerras 
806e1f0ece1SMichael Neuling }
807e1f0ece1SMichael Neuling 
8085ad57078SPaul Mackerras #ifdef CONFIG_HOTPLUG_CPU
8095ad57078SPaul Mackerras int __cpu_disable(void)
8105ad57078SPaul Mackerras {
811e2075f79SNathan Lynch 	int cpu = smp_processor_id();
812e2075f79SNathan Lynch 	int base, i;
813e2075f79SNathan Lynch 	int err;
8145ad57078SPaul Mackerras 
815e2075f79SNathan Lynch 	if (!smp_ops->cpu_disable)
8165ad57078SPaul Mackerras 		return -ENOSYS;
817e2075f79SNathan Lynch 
818e2075f79SNathan Lynch 	err = smp_ops->cpu_disable();
819e2075f79SNathan Lynch 	if (err)
820e2075f79SNathan Lynch 		return err;
821e2075f79SNathan Lynch 
822e2075f79SNathan Lynch 	/* Update sibling maps */
82399d86705SVaidyanathan Srinivasan 	base = cpu_first_thread_sibling(cpu);
824e2075f79SNathan Lynch 	for (i = 0; i < threads_per_core; i++) {
825cc1ba8eaSAnton Blanchard 		cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
826cc1ba8eaSAnton Blanchard 		cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
827cc1ba8eaSAnton Blanchard 		cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
828cc1ba8eaSAnton Blanchard 		cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
829e2075f79SNathan Lynch 	}
830a8a5356cSPaul Mackerras 	traverse_core_siblings(cpu, false);
831440a0857SNathan Lynch 
832e2075f79SNathan Lynch 	return 0;
8335ad57078SPaul Mackerras }
8345ad57078SPaul Mackerras 
8355ad57078SPaul Mackerras void __cpu_die(unsigned int cpu)
8365ad57078SPaul Mackerras {
8375ad57078SPaul Mackerras 	if (smp_ops->cpu_die)
8385ad57078SPaul Mackerras 		smp_ops->cpu_die(cpu);
8395ad57078SPaul Mackerras }
840d0174c72SNathan Fontenot 
841abb17f9cSMilton Miller void cpu_die(void)
842abb17f9cSMilton Miller {
843abb17f9cSMilton Miller 	if (ppc_md.cpu_die)
844abb17f9cSMilton Miller 		ppc_md.cpu_die();
845fa3f82c8SBenjamin Herrenschmidt 
846fa3f82c8SBenjamin Herrenschmidt 	/* If we return, we re-enter start_secondary */
847fa3f82c8SBenjamin Herrenschmidt 	start_secondary_resume();
848abb17f9cSMilton Miller }
849fa3f82c8SBenjamin Herrenschmidt 
8505ad57078SPaul Mackerras #endif
851