xref: /openbmc/linux/arch/arm/mach-exynos/platsmp.c (revision 6f0b7c0c6faa76c32891ef1f7ee37c7e10aeb039)
14552386aSPankaj Dubey  /*
283014579SKukjin Kim  * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
383014579SKukjin Kim  *		http://www.samsung.com
483014579SKukjin Kim  *
583014579SKukjin Kim  * Cloned from linux/arch/arm/mach-vexpress/platsmp.c
683014579SKukjin Kim  *
783014579SKukjin Kim  *  Copyright (C) 2002 ARM Ltd.
883014579SKukjin Kim  *  All Rights Reserved
983014579SKukjin Kim  *
1083014579SKukjin Kim  * This program is free software; you can redistribute it and/or modify
1183014579SKukjin Kim  * it under the terms of the GNU General Public License version 2 as
1283014579SKukjin Kim  * published by the Free Software Foundation.
1383014579SKukjin Kim */
1483014579SKukjin Kim 
1583014579SKukjin Kim #include <linux/init.h>
1683014579SKukjin Kim #include <linux/errno.h>
1783014579SKukjin Kim #include <linux/delay.h>
1883014579SKukjin Kim #include <linux/device.h>
1983014579SKukjin Kim #include <linux/jiffies.h>
2083014579SKukjin Kim #include <linux/smp.h>
2183014579SKukjin Kim #include <linux/io.h>
22b3205deaSSachin Kamat #include <linux/of_address.h>
2383014579SKukjin Kim 
2483014579SKukjin Kim #include <asm/cacheflush.h>
25*6f0b7c0cSKrzysztof Kozlowski #include <asm/cp15.h>
26eb50439bSWill Deacon #include <asm/smp_plat.h>
2783014579SKukjin Kim #include <asm/smp_scu.h>
28beddf63fSTomasz Figa #include <asm/firmware.h>
2983014579SKukjin Kim 
302e94ac42SPankaj Dubey #include <mach/map.h>
312e94ac42SPankaj Dubey 
3206853ae4SMarc Zyngier #include "common.h"
3365c9a853SKukjin Kim #include "regs-pmu.h"
3406853ae4SMarc Zyngier 
3583014579SKukjin Kim extern void exynos4_secondary_startup(void);
3683014579SKukjin Kim 
37*6f0b7c0cSKrzysztof Kozlowski #ifdef CONFIG_HOTPLUG_CPU
38*6f0b7c0cSKrzysztof Kozlowski static inline void cpu_leave_lowpower(void)
39*6f0b7c0cSKrzysztof Kozlowski {
40*6f0b7c0cSKrzysztof Kozlowski 	unsigned int v;
41*6f0b7c0cSKrzysztof Kozlowski 
42*6f0b7c0cSKrzysztof Kozlowski 	asm volatile(
43*6f0b7c0cSKrzysztof Kozlowski 	"mrc	p15, 0, %0, c1, c0, 0\n"
44*6f0b7c0cSKrzysztof Kozlowski 	"	orr	%0, %0, %1\n"
45*6f0b7c0cSKrzysztof Kozlowski 	"	mcr	p15, 0, %0, c1, c0, 0\n"
46*6f0b7c0cSKrzysztof Kozlowski 	"	mrc	p15, 0, %0, c1, c0, 1\n"
47*6f0b7c0cSKrzysztof Kozlowski 	"	orr	%0, %0, %2\n"
48*6f0b7c0cSKrzysztof Kozlowski 	"	mcr	p15, 0, %0, c1, c0, 1\n"
49*6f0b7c0cSKrzysztof Kozlowski 	  : "=&r" (v)
50*6f0b7c0cSKrzysztof Kozlowski 	  : "Ir" (CR_C), "Ir" (0x40)
51*6f0b7c0cSKrzysztof Kozlowski 	  : "cc");
52*6f0b7c0cSKrzysztof Kozlowski }
53*6f0b7c0cSKrzysztof Kozlowski 
54*6f0b7c0cSKrzysztof Kozlowski static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
55*6f0b7c0cSKrzysztof Kozlowski {
56*6f0b7c0cSKrzysztof Kozlowski 	u32 mpidr = cpu_logical_map(cpu);
57*6f0b7c0cSKrzysztof Kozlowski 	u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
58*6f0b7c0cSKrzysztof Kozlowski 
59*6f0b7c0cSKrzysztof Kozlowski 	for (;;) {
60*6f0b7c0cSKrzysztof Kozlowski 
61*6f0b7c0cSKrzysztof Kozlowski 		/* Turn the CPU off on next WFI instruction. */
62*6f0b7c0cSKrzysztof Kozlowski 		exynos_cpu_power_down(core_id);
63*6f0b7c0cSKrzysztof Kozlowski 
64*6f0b7c0cSKrzysztof Kozlowski 		wfi();
65*6f0b7c0cSKrzysztof Kozlowski 
66*6f0b7c0cSKrzysztof Kozlowski 		if (pen_release == core_id) {
67*6f0b7c0cSKrzysztof Kozlowski 			/*
68*6f0b7c0cSKrzysztof Kozlowski 			 * OK, proper wakeup, we're done
69*6f0b7c0cSKrzysztof Kozlowski 			 */
70*6f0b7c0cSKrzysztof Kozlowski 			break;
71*6f0b7c0cSKrzysztof Kozlowski 		}
72*6f0b7c0cSKrzysztof Kozlowski 
73*6f0b7c0cSKrzysztof Kozlowski 		/*
74*6f0b7c0cSKrzysztof Kozlowski 		 * Getting here, means that we have come out of WFI without
75*6f0b7c0cSKrzysztof Kozlowski 		 * having been woken up - this shouldn't happen
76*6f0b7c0cSKrzysztof Kozlowski 		 *
77*6f0b7c0cSKrzysztof Kozlowski 		 * Just note it happening - when we're woken, we can report
78*6f0b7c0cSKrzysztof Kozlowski 		 * its occurrence.
79*6f0b7c0cSKrzysztof Kozlowski 		 */
80*6f0b7c0cSKrzysztof Kozlowski 		(*spurious)++;
81*6f0b7c0cSKrzysztof Kozlowski 	}
82*6f0b7c0cSKrzysztof Kozlowski }
83*6f0b7c0cSKrzysztof Kozlowski #endif /* CONFIG_HOTPLUG_CPU */
84*6f0b7c0cSKrzysztof Kozlowski 
857310d99fSKrzysztof Kozlowski /**
867310d99fSKrzysztof Kozlowski  * exynos_core_power_down : power down the specified cpu
877310d99fSKrzysztof Kozlowski  * @cpu : the cpu to power down
887310d99fSKrzysztof Kozlowski  *
897310d99fSKrzysztof Kozlowski  * Power down the specified cpu. The sequence must be finished by a
907310d99fSKrzysztof Kozlowski  * call to cpu_do_idle()
917310d99fSKrzysztof Kozlowski  *
927310d99fSKrzysztof Kozlowski  */
937310d99fSKrzysztof Kozlowski void exynos_cpu_power_down(int cpu)
947310d99fSKrzysztof Kozlowski {
95944483d0SArnd Bergmann 	pmu_raw_writel(0, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
967310d99fSKrzysztof Kozlowski }
977310d99fSKrzysztof Kozlowski 
987310d99fSKrzysztof Kozlowski /**
997310d99fSKrzysztof Kozlowski  * exynos_cpu_power_up : power up the specified cpu
1007310d99fSKrzysztof Kozlowski  * @cpu : the cpu to power up
1017310d99fSKrzysztof Kozlowski  *
1027310d99fSKrzysztof Kozlowski  * Power up the specified cpu
1037310d99fSKrzysztof Kozlowski  */
1047310d99fSKrzysztof Kozlowski void exynos_cpu_power_up(int cpu)
1057310d99fSKrzysztof Kozlowski {
106944483d0SArnd Bergmann 	pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
1077310d99fSKrzysztof Kozlowski 			EXYNOS_ARM_CORE_CONFIGURATION(cpu));
1087310d99fSKrzysztof Kozlowski }
1097310d99fSKrzysztof Kozlowski 
1107310d99fSKrzysztof Kozlowski /**
1117310d99fSKrzysztof Kozlowski  * exynos_cpu_power_state : returns the power state of the cpu
1127310d99fSKrzysztof Kozlowski  * @cpu : the cpu to retrieve the power state from
1137310d99fSKrzysztof Kozlowski  *
1147310d99fSKrzysztof Kozlowski  */
1157310d99fSKrzysztof Kozlowski int exynos_cpu_power_state(int cpu)
1167310d99fSKrzysztof Kozlowski {
117944483d0SArnd Bergmann 	return (pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(cpu)) &
1187310d99fSKrzysztof Kozlowski 			S5P_CORE_LOCAL_PWR_EN);
1197310d99fSKrzysztof Kozlowski }
1207310d99fSKrzysztof Kozlowski 
1217310d99fSKrzysztof Kozlowski /**
1227310d99fSKrzysztof Kozlowski  * exynos_cluster_power_down : power down the specified cluster
1237310d99fSKrzysztof Kozlowski  * @cluster : the cluster to power down
1247310d99fSKrzysztof Kozlowski  */
1257310d99fSKrzysztof Kozlowski void exynos_cluster_power_down(int cluster)
1267310d99fSKrzysztof Kozlowski {
127944483d0SArnd Bergmann 	pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(cluster));
1287310d99fSKrzysztof Kozlowski }
1297310d99fSKrzysztof Kozlowski 
1307310d99fSKrzysztof Kozlowski /**
1317310d99fSKrzysztof Kozlowski  * exynos_cluster_power_up : power up the specified cluster
1327310d99fSKrzysztof Kozlowski  * @cluster : the cluster to power up
1337310d99fSKrzysztof Kozlowski  */
1347310d99fSKrzysztof Kozlowski void exynos_cluster_power_up(int cluster)
1357310d99fSKrzysztof Kozlowski {
136944483d0SArnd Bergmann 	pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
1377310d99fSKrzysztof Kozlowski 			EXYNOS_COMMON_CONFIGURATION(cluster));
1387310d99fSKrzysztof Kozlowski }
1397310d99fSKrzysztof Kozlowski 
1407310d99fSKrzysztof Kozlowski /**
1417310d99fSKrzysztof Kozlowski  * exynos_cluster_power_state : returns the power state of the cluster
1427310d99fSKrzysztof Kozlowski  * @cluster : the cluster to retrieve the power state from
1437310d99fSKrzysztof Kozlowski  *
1447310d99fSKrzysztof Kozlowski  */
1457310d99fSKrzysztof Kozlowski int exynos_cluster_power_state(int cluster)
1467310d99fSKrzysztof Kozlowski {
147944483d0SArnd Bergmann 	return (pmu_raw_readl(EXYNOS_COMMON_STATUS(cluster)) &
1487310d99fSKrzysztof Kozlowski 		S5P_CORE_LOCAL_PWR_EN);
1497310d99fSKrzysztof Kozlowski }
1507310d99fSKrzysztof Kozlowski 
1511f054f52STomasz Figa static inline void __iomem *cpu_boot_reg_base(void)
1521f054f52STomasz Figa {
1531f054f52STomasz Figa 	if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_1_1)
1542e94ac42SPankaj Dubey 		return pmu_base_addr + S5P_INFORM5;
155b3205deaSSachin Kamat 	return sysram_base_addr;
1561f054f52STomasz Figa }
1571f054f52STomasz Figa 
1581f054f52STomasz Figa static inline void __iomem *cpu_boot_reg(int cpu)
1591f054f52STomasz Figa {
1601f054f52STomasz Figa 	void __iomem *boot_reg;
1611f054f52STomasz Figa 
1621f054f52STomasz Figa 	boot_reg = cpu_boot_reg_base();
163b3205deaSSachin Kamat 	if (!boot_reg)
164b3205deaSSachin Kamat 		return ERR_PTR(-ENODEV);
1651f054f52STomasz Figa 	if (soc_is_exynos4412())
1661f054f52STomasz Figa 		boot_reg += 4*cpu;
16786c6f148SArun Kumar K 	else if (soc_is_exynos5420() || soc_is_exynos5800())
1681580be3dSChander Kashyap 		boot_reg += 4;
1691f054f52STomasz Figa 	return boot_reg;
1701f054f52STomasz Figa }
17183014579SKukjin Kim 
17283014579SKukjin Kim /*
17383014579SKukjin Kim  * Write pen_release in a way that is guaranteed to be visible to all
17483014579SKukjin Kim  * observers, irrespective of whether they're taking part in coherency
17583014579SKukjin Kim  * or not.  This is necessary for the hotplug code to work reliably.
17683014579SKukjin Kim  */
17783014579SKukjin Kim static void write_pen_release(int val)
17883014579SKukjin Kim {
17983014579SKukjin Kim 	pen_release = val;
18083014579SKukjin Kim 	smp_wmb();
181f45913fdSNicolas Pitre 	sync_cache_w(&pen_release);
18283014579SKukjin Kim }
18383014579SKukjin Kim 
18483014579SKukjin Kim static void __iomem *scu_base_addr(void)
18583014579SKukjin Kim {
18683014579SKukjin Kim 	return (void __iomem *)(S5P_VA_SCU);
18783014579SKukjin Kim }
18883014579SKukjin Kim 
18983014579SKukjin Kim static DEFINE_SPINLOCK(boot_lock);
19083014579SKukjin Kim 
1918bd26e3aSPaul Gortmaker static void exynos_secondary_init(unsigned int cpu)
19283014579SKukjin Kim {
19383014579SKukjin Kim 	/*
19483014579SKukjin Kim 	 * let the primary processor know we're out of the
19583014579SKukjin Kim 	 * pen, then head off into the C entry point
19683014579SKukjin Kim 	 */
19783014579SKukjin Kim 	write_pen_release(-1);
19883014579SKukjin Kim 
19983014579SKukjin Kim 	/*
20083014579SKukjin Kim 	 * Synchronise with the boot thread.
20183014579SKukjin Kim 	 */
20283014579SKukjin Kim 	spin_lock(&boot_lock);
20383014579SKukjin Kim 	spin_unlock(&boot_lock);
20483014579SKukjin Kim }
20583014579SKukjin Kim 
2068bd26e3aSPaul Gortmaker static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
20783014579SKukjin Kim {
20883014579SKukjin Kim 	unsigned long timeout;
2099637f30eSTomasz Figa 	u32 mpidr = cpu_logical_map(cpu);
2109637f30eSTomasz Figa 	u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
211b3205deaSSachin Kamat 	int ret = -ENOSYS;
21283014579SKukjin Kim 
21383014579SKukjin Kim 	/*
21483014579SKukjin Kim 	 * Set synchronisation state between this boot processor
21583014579SKukjin Kim 	 * and the secondary one
21683014579SKukjin Kim 	 */
21783014579SKukjin Kim 	spin_lock(&boot_lock);
21883014579SKukjin Kim 
21983014579SKukjin Kim 	/*
22083014579SKukjin Kim 	 * The secondary processor is waiting to be released from
22183014579SKukjin Kim 	 * the holding pen - release it, then wait for it to flag
22283014579SKukjin Kim 	 * that it has been released by resetting pen_release.
22383014579SKukjin Kim 	 *
2249637f30eSTomasz Figa 	 * Note that "pen_release" is the hardware CPU core ID, whereas
22583014579SKukjin Kim 	 * "cpu" is Linux's internal ID.
22683014579SKukjin Kim 	 */
2279637f30eSTomasz Figa 	write_pen_release(core_id);
22883014579SKukjin Kim 
2299637f30eSTomasz Figa 	if (!exynos_cpu_power_state(core_id)) {
2309637f30eSTomasz Figa 		exynos_cpu_power_up(core_id);
23183014579SKukjin Kim 		timeout = 10;
23283014579SKukjin Kim 
23383014579SKukjin Kim 		/* wait max 10 ms until cpu1 is on */
2349637f30eSTomasz Figa 		while (exynos_cpu_power_state(core_id)
2359637f30eSTomasz Figa 		       != S5P_CORE_LOCAL_PWR_EN) {
23683014579SKukjin Kim 			if (timeout-- == 0)
23783014579SKukjin Kim 				break;
23883014579SKukjin Kim 
23983014579SKukjin Kim 			mdelay(1);
24083014579SKukjin Kim 		}
24183014579SKukjin Kim 
24283014579SKukjin Kim 		if (timeout == 0) {
24383014579SKukjin Kim 			printk(KERN_ERR "cpu1 power enable failed");
24483014579SKukjin Kim 			spin_unlock(&boot_lock);
24583014579SKukjin Kim 			return -ETIMEDOUT;
24683014579SKukjin Kim 		}
24783014579SKukjin Kim 	}
24883014579SKukjin Kim 	/*
24983014579SKukjin Kim 	 * Send the secondary CPU a soft interrupt, thereby causing
25083014579SKukjin Kim 	 * the boot monitor to read the system wide flags register,
25183014579SKukjin Kim 	 * and branch to the address found there.
25283014579SKukjin Kim 	 */
25383014579SKukjin Kim 
25483014579SKukjin Kim 	timeout = jiffies + (1 * HZ);
25583014579SKukjin Kim 	while (time_before(jiffies, timeout)) {
256beddf63fSTomasz Figa 		unsigned long boot_addr;
257beddf63fSTomasz Figa 
25883014579SKukjin Kim 		smp_rmb();
25983014579SKukjin Kim 
260beddf63fSTomasz Figa 		boot_addr = virt_to_phys(exynos4_secondary_startup);
261beddf63fSTomasz Figa 
262beddf63fSTomasz Figa 		/*
263beddf63fSTomasz Figa 		 * Try to set boot address using firmware first
264beddf63fSTomasz Figa 		 * and fall back to boot register if it fails.
265beddf63fSTomasz Figa 		 */
2669637f30eSTomasz Figa 		ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
267b3205deaSSachin Kamat 		if (ret && ret != -ENOSYS)
268b3205deaSSachin Kamat 			goto fail;
269b3205deaSSachin Kamat 		if (ret == -ENOSYS) {
2709637f30eSTomasz Figa 			void __iomem *boot_reg = cpu_boot_reg(core_id);
271b3205deaSSachin Kamat 
272b3205deaSSachin Kamat 			if (IS_ERR(boot_reg)) {
273b3205deaSSachin Kamat 				ret = PTR_ERR(boot_reg);
274b3205deaSSachin Kamat 				goto fail;
275b3205deaSSachin Kamat 			}
27668ba947cSKrzysztof Kozlowski 			__raw_writel(boot_addr, boot_reg);
277b3205deaSSachin Kamat 		}
278beddf63fSTomasz Figa 
2799637f30eSTomasz Figa 		call_firmware_op(cpu_boot, core_id);
280beddf63fSTomasz Figa 
281b1cffebfSRob Herring 		arch_send_wakeup_ipi_mask(cpumask_of(cpu));
28283014579SKukjin Kim 
28383014579SKukjin Kim 		if (pen_release == -1)
28483014579SKukjin Kim 			break;
28583014579SKukjin Kim 
28683014579SKukjin Kim 		udelay(10);
28783014579SKukjin Kim 	}
28883014579SKukjin Kim 
28983014579SKukjin Kim 	/*
29083014579SKukjin Kim 	 * now the secondary core is starting up let it run its
29183014579SKukjin Kim 	 * calibrations, then wait for it to finish
29283014579SKukjin Kim 	 */
293b3205deaSSachin Kamat fail:
29483014579SKukjin Kim 	spin_unlock(&boot_lock);
29583014579SKukjin Kim 
296b3205deaSSachin Kamat 	return pen_release != -1 ? ret : 0;
29783014579SKukjin Kim }
29883014579SKukjin Kim 
29983014579SKukjin Kim /*
30083014579SKukjin Kim  * Initialise the CPU possible map early - this describes the CPUs
30183014579SKukjin Kim  * which may be present or become present in the system.
30283014579SKukjin Kim  */
30383014579SKukjin Kim 
30406853ae4SMarc Zyngier static void __init exynos_smp_init_cpus(void)
30583014579SKukjin Kim {
30683014579SKukjin Kim 	void __iomem *scu_base = scu_base_addr();
30783014579SKukjin Kim 	unsigned int i, ncores;
30883014579SKukjin Kim 
309af040ffcSRussell King 	if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
31083014579SKukjin Kim 		ncores = scu_base ? scu_get_core_count(scu_base) : 1;
3111897d2f3SChander Kashyap 	else
3121897d2f3SChander Kashyap 		/*
3131897d2f3SChander Kashyap 		 * CPU Nodes are passed thru DT and set_cpu_possible
3141897d2f3SChander Kashyap 		 * is set by "arm_dt_init_cpu_maps".
3151897d2f3SChander Kashyap 		 */
3161897d2f3SChander Kashyap 		return;
31783014579SKukjin Kim 
31883014579SKukjin Kim 	/* sanity check */
31983014579SKukjin Kim 	if (ncores > nr_cpu_ids) {
32083014579SKukjin Kim 		pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
32183014579SKukjin Kim 			ncores, nr_cpu_ids);
32283014579SKukjin Kim 		ncores = nr_cpu_ids;
32383014579SKukjin Kim 	}
32483014579SKukjin Kim 
32583014579SKukjin Kim 	for (i = 0; i < ncores; i++)
32683014579SKukjin Kim 		set_cpu_possible(i, true);
32783014579SKukjin Kim }
32883014579SKukjin Kim 
32906853ae4SMarc Zyngier static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
33083014579SKukjin Kim {
3311f054f52STomasz Figa 	int i;
3321f054f52STomasz Figa 
3331754c42eSOlof Johansson 	exynos_sysram_init();
3341754c42eSOlof Johansson 
335af040ffcSRussell King 	if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
33683014579SKukjin Kim 		scu_enable(scu_base_addr());
33783014579SKukjin Kim 
33883014579SKukjin Kim 	/*
33983014579SKukjin Kim 	 * Write the address of secondary startup into the
34083014579SKukjin Kim 	 * system-wide flags register. The boot monitor waits
34183014579SKukjin Kim 	 * until it receives a soft interrupt, and then the
34283014579SKukjin Kim 	 * secondary CPU branches to this address.
343beddf63fSTomasz Figa 	 *
344beddf63fSTomasz Figa 	 * Try using firmware operation first and fall back to
345beddf63fSTomasz Figa 	 * boot register if it fails.
34683014579SKukjin Kim 	 */
347beddf63fSTomasz Figa 	for (i = 1; i < max_cpus; ++i) {
348beddf63fSTomasz Figa 		unsigned long boot_addr;
3499637f30eSTomasz Figa 		u32 mpidr;
3509637f30eSTomasz Figa 		u32 core_id;
351b3205deaSSachin Kamat 		int ret;
352beddf63fSTomasz Figa 
3539637f30eSTomasz Figa 		mpidr = cpu_logical_map(i);
3549637f30eSTomasz Figa 		core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
355beddf63fSTomasz Figa 		boot_addr = virt_to_phys(exynos4_secondary_startup);
356beddf63fSTomasz Figa 
3579637f30eSTomasz Figa 		ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
358b3205deaSSachin Kamat 		if (ret && ret != -ENOSYS)
359b3205deaSSachin Kamat 			break;
360b3205deaSSachin Kamat 		if (ret == -ENOSYS) {
3619637f30eSTomasz Figa 			void __iomem *boot_reg = cpu_boot_reg(core_id);
362b3205deaSSachin Kamat 
363b3205deaSSachin Kamat 			if (IS_ERR(boot_reg))
364b3205deaSSachin Kamat 				break;
36568ba947cSKrzysztof Kozlowski 			__raw_writel(boot_addr, boot_reg);
366beddf63fSTomasz Figa 		}
36783014579SKukjin Kim 	}
368b3205deaSSachin Kamat }
36906853ae4SMarc Zyngier 
370*6f0b7c0cSKrzysztof Kozlowski #ifdef CONFIG_HOTPLUG_CPU
371*6f0b7c0cSKrzysztof Kozlowski /*
372*6f0b7c0cSKrzysztof Kozlowski  * platform-specific code to shutdown a CPU
373*6f0b7c0cSKrzysztof Kozlowski  *
374*6f0b7c0cSKrzysztof Kozlowski  * Called with IRQs disabled
375*6f0b7c0cSKrzysztof Kozlowski  */
376*6f0b7c0cSKrzysztof Kozlowski static void __ref exynos_cpu_die(unsigned int cpu)
377*6f0b7c0cSKrzysztof Kozlowski {
378*6f0b7c0cSKrzysztof Kozlowski 	int spurious = 0;
379*6f0b7c0cSKrzysztof Kozlowski 
380*6f0b7c0cSKrzysztof Kozlowski 	v7_exit_coherency_flush(louis);
381*6f0b7c0cSKrzysztof Kozlowski 
382*6f0b7c0cSKrzysztof Kozlowski 	platform_do_lowpower(cpu, &spurious);
383*6f0b7c0cSKrzysztof Kozlowski 
384*6f0b7c0cSKrzysztof Kozlowski 	/*
385*6f0b7c0cSKrzysztof Kozlowski 	 * bring this CPU back into the world of cache
386*6f0b7c0cSKrzysztof Kozlowski 	 * coherency, and then restore interrupts
387*6f0b7c0cSKrzysztof Kozlowski 	 */
388*6f0b7c0cSKrzysztof Kozlowski 	cpu_leave_lowpower();
389*6f0b7c0cSKrzysztof Kozlowski 
390*6f0b7c0cSKrzysztof Kozlowski 	if (spurious)
391*6f0b7c0cSKrzysztof Kozlowski 		pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
392*6f0b7c0cSKrzysztof Kozlowski }
393*6f0b7c0cSKrzysztof Kozlowski #endif /* CONFIG_HOTPLUG_CPU */
394*6f0b7c0cSKrzysztof Kozlowski 
39506853ae4SMarc Zyngier struct smp_operations exynos_smp_ops __initdata = {
39606853ae4SMarc Zyngier 	.smp_init_cpus		= exynos_smp_init_cpus,
39706853ae4SMarc Zyngier 	.smp_prepare_cpus	= exynos_smp_prepare_cpus,
39806853ae4SMarc Zyngier 	.smp_secondary_init	= exynos_secondary_init,
39906853ae4SMarc Zyngier 	.smp_boot_secondary	= exynos_boot_secondary,
40006853ae4SMarc Zyngier #ifdef CONFIG_HOTPLUG_CPU
40106853ae4SMarc Zyngier 	.cpu_die		= exynos_cpu_die,
40206853ae4SMarc Zyngier #endif
40306853ae4SMarc Zyngier };
404