xref: /openbmc/linux/arch/arm/mach-exynos/platsmp.c (revision adc548d77c22daa371d5217b382a139b593dec47)
14552386aSPankaj Dubey  /*
283014579SKukjin Kim  * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
383014579SKukjin Kim  *		http://www.samsung.com
483014579SKukjin Kim  *
583014579SKukjin Kim  * Cloned from linux/arch/arm/mach-vexpress/platsmp.c
683014579SKukjin Kim  *
783014579SKukjin Kim  *  Copyright (C) 2002 ARM Ltd.
883014579SKukjin Kim  *  All Rights Reserved
983014579SKukjin Kim  *
1083014579SKukjin Kim  * This program is free software; you can redistribute it and/or modify
1183014579SKukjin Kim  * it under the terms of the GNU General Public License version 2 as
1283014579SKukjin Kim  * published by the Free Software Foundation.
1383014579SKukjin Kim */
1483014579SKukjin Kim 
1583014579SKukjin Kim #include <linux/init.h>
1683014579SKukjin Kim #include <linux/errno.h>
1783014579SKukjin Kim #include <linux/delay.h>
1883014579SKukjin Kim #include <linux/device.h>
1983014579SKukjin Kim #include <linux/jiffies.h>
2083014579SKukjin Kim #include <linux/smp.h>
2183014579SKukjin Kim #include <linux/io.h>
22b3205deaSSachin Kamat #include <linux/of_address.h>
2383014579SKukjin Kim 
2483014579SKukjin Kim #include <asm/cacheflush.h>
256f0b7c0cSKrzysztof Kozlowski #include <asm/cp15.h>
26eb50439bSWill Deacon #include <asm/smp_plat.h>
2783014579SKukjin Kim #include <asm/smp_scu.h>
28beddf63fSTomasz Figa #include <asm/firmware.h>
2983014579SKukjin Kim 
302e94ac42SPankaj Dubey #include <mach/map.h>
312e94ac42SPankaj Dubey 
3206853ae4SMarc Zyngier #include "common.h"
3365c9a853SKukjin Kim #include "regs-pmu.h"
3406853ae4SMarc Zyngier 
3583014579SKukjin Kim extern void exynos4_secondary_startup(void);
3683014579SKukjin Kim 
3713cfa6c4SKrzysztof Kozlowski /*
3813cfa6c4SKrzysztof Kozlowski  * Set or clear the USE_DELAYED_RESET_ASSERTION option, set on Exynos4 SoCs
3913cfa6c4SKrzysztof Kozlowski  * during hot-(un)plugging CPUx.
4013cfa6c4SKrzysztof Kozlowski  *
4113cfa6c4SKrzysztof Kozlowski  * The feature can be cleared safely during first boot of secondary CPU.
4213cfa6c4SKrzysztof Kozlowski  *
4313cfa6c4SKrzysztof Kozlowski  * Exynos4 SoCs require setting USE_DELAYED_RESET_ASSERTION during powering
4413cfa6c4SKrzysztof Kozlowski  * down a CPU so the CPU idle clock down feature could properly detect global
4513cfa6c4SKrzysztof Kozlowski  * idle state when CPUx is off.
4613cfa6c4SKrzysztof Kozlowski  */
4713cfa6c4SKrzysztof Kozlowski static void exynos_set_delayed_reset_assertion(u32 core_id, bool enable)
4813cfa6c4SKrzysztof Kozlowski {
4913cfa6c4SKrzysztof Kozlowski 	if (soc_is_exynos4()) {
5013cfa6c4SKrzysztof Kozlowski 		unsigned int tmp;
5113cfa6c4SKrzysztof Kozlowski 
5213cfa6c4SKrzysztof Kozlowski 		tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id));
5313cfa6c4SKrzysztof Kozlowski 		if (enable)
5413cfa6c4SKrzysztof Kozlowski 			tmp |= S5P_USE_DELAYED_RESET_ASSERTION;
5513cfa6c4SKrzysztof Kozlowski 		else
5613cfa6c4SKrzysztof Kozlowski 			tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION);
5713cfa6c4SKrzysztof Kozlowski 		pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id));
5813cfa6c4SKrzysztof Kozlowski 	}
5913cfa6c4SKrzysztof Kozlowski }
6013cfa6c4SKrzysztof Kozlowski 
616f0b7c0cSKrzysztof Kozlowski #ifdef CONFIG_HOTPLUG_CPU
6213cfa6c4SKrzysztof Kozlowski static inline void cpu_leave_lowpower(u32 core_id)
636f0b7c0cSKrzysztof Kozlowski {
646f0b7c0cSKrzysztof Kozlowski 	unsigned int v;
656f0b7c0cSKrzysztof Kozlowski 
666f0b7c0cSKrzysztof Kozlowski 	asm volatile(
676f0b7c0cSKrzysztof Kozlowski 	"mrc	p15, 0, %0, c1, c0, 0\n"
686f0b7c0cSKrzysztof Kozlowski 	"	orr	%0, %0, %1\n"
696f0b7c0cSKrzysztof Kozlowski 	"	mcr	p15, 0, %0, c1, c0, 0\n"
706f0b7c0cSKrzysztof Kozlowski 	"	mrc	p15, 0, %0, c1, c0, 1\n"
716f0b7c0cSKrzysztof Kozlowski 	"	orr	%0, %0, %2\n"
726f0b7c0cSKrzysztof Kozlowski 	"	mcr	p15, 0, %0, c1, c0, 1\n"
736f0b7c0cSKrzysztof Kozlowski 	  : "=&r" (v)
746f0b7c0cSKrzysztof Kozlowski 	  : "Ir" (CR_C), "Ir" (0x40)
756f0b7c0cSKrzysztof Kozlowski 	  : "cc");
7613cfa6c4SKrzysztof Kozlowski 
7713cfa6c4SKrzysztof Kozlowski 	 exynos_set_delayed_reset_assertion(core_id, false);
786f0b7c0cSKrzysztof Kozlowski }
796f0b7c0cSKrzysztof Kozlowski 
806f0b7c0cSKrzysztof Kozlowski static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
816f0b7c0cSKrzysztof Kozlowski {
826f0b7c0cSKrzysztof Kozlowski 	u32 mpidr = cpu_logical_map(cpu);
836f0b7c0cSKrzysztof Kozlowski 	u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
846f0b7c0cSKrzysztof Kozlowski 
856f0b7c0cSKrzysztof Kozlowski 	for (;;) {
866f0b7c0cSKrzysztof Kozlowski 
876f0b7c0cSKrzysztof Kozlowski 		/* Turn the CPU off on next WFI instruction. */
886f0b7c0cSKrzysztof Kozlowski 		exynos_cpu_power_down(core_id);
896f0b7c0cSKrzysztof Kozlowski 
9013cfa6c4SKrzysztof Kozlowski 		/*
9113cfa6c4SKrzysztof Kozlowski 		 * Exynos4 SoCs require setting
9213cfa6c4SKrzysztof Kozlowski 		 * USE_DELAYED_RESET_ASSERTION so the CPU idle
9313cfa6c4SKrzysztof Kozlowski 		 * clock down feature could properly detect
9413cfa6c4SKrzysztof Kozlowski 		 * global idle state when CPUx is off.
9513cfa6c4SKrzysztof Kozlowski 		 */
9613cfa6c4SKrzysztof Kozlowski 		exynos_set_delayed_reset_assertion(core_id, true);
9713cfa6c4SKrzysztof Kozlowski 
986f0b7c0cSKrzysztof Kozlowski 		wfi();
996f0b7c0cSKrzysztof Kozlowski 
1006f0b7c0cSKrzysztof Kozlowski 		if (pen_release == core_id) {
1016f0b7c0cSKrzysztof Kozlowski 			/*
1026f0b7c0cSKrzysztof Kozlowski 			 * OK, proper wakeup, we're done
1036f0b7c0cSKrzysztof Kozlowski 			 */
1046f0b7c0cSKrzysztof Kozlowski 			break;
1056f0b7c0cSKrzysztof Kozlowski 		}
1066f0b7c0cSKrzysztof Kozlowski 
1076f0b7c0cSKrzysztof Kozlowski 		/*
1086f0b7c0cSKrzysztof Kozlowski 		 * Getting here, means that we have come out of WFI without
1096f0b7c0cSKrzysztof Kozlowski 		 * having been woken up - this shouldn't happen
1106f0b7c0cSKrzysztof Kozlowski 		 *
1116f0b7c0cSKrzysztof Kozlowski 		 * Just note it happening - when we're woken, we can report
1126f0b7c0cSKrzysztof Kozlowski 		 * its occurrence.
1136f0b7c0cSKrzysztof Kozlowski 		 */
1146f0b7c0cSKrzysztof Kozlowski 		(*spurious)++;
1156f0b7c0cSKrzysztof Kozlowski 	}
1166f0b7c0cSKrzysztof Kozlowski }
1176f0b7c0cSKrzysztof Kozlowski #endif /* CONFIG_HOTPLUG_CPU */
1186f0b7c0cSKrzysztof Kozlowski 
1197310d99fSKrzysztof Kozlowski /**
1207310d99fSKrzysztof Kozlowski  * exynos_core_power_down : power down the specified cpu
1217310d99fSKrzysztof Kozlowski  * @cpu : the cpu to power down
1227310d99fSKrzysztof Kozlowski  *
1237310d99fSKrzysztof Kozlowski  * Power down the specified cpu. The sequence must be finished by a
1247310d99fSKrzysztof Kozlowski  * call to cpu_do_idle()
1257310d99fSKrzysztof Kozlowski  *
1267310d99fSKrzysztof Kozlowski  */
1277310d99fSKrzysztof Kozlowski void exynos_cpu_power_down(int cpu)
1287310d99fSKrzysztof Kozlowski {
129*adc548d7SAbhilash Kesavan 	if (cpu == 0 && (of_machine_is_compatible("samsung,exynos5420") ||
130*adc548d7SAbhilash Kesavan 		of_machine_is_compatible("samsung,exynos5800"))) {
131*adc548d7SAbhilash Kesavan 		/*
132*adc548d7SAbhilash Kesavan 		 * Bypass power down for CPU0 during suspend. Check for
133*adc548d7SAbhilash Kesavan 		 * the SYS_PWR_REG value to decide if we are suspending
134*adc548d7SAbhilash Kesavan 		 * the system.
135*adc548d7SAbhilash Kesavan 		 */
136*adc548d7SAbhilash Kesavan 		int val = pmu_raw_readl(EXYNOS5_ARM_CORE0_SYS_PWR_REG);
137*adc548d7SAbhilash Kesavan 
138*adc548d7SAbhilash Kesavan 		if (!(val & S5P_CORE_LOCAL_PWR_EN))
139*adc548d7SAbhilash Kesavan 			return;
140*adc548d7SAbhilash Kesavan 	}
141944483d0SArnd Bergmann 	pmu_raw_writel(0, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
1427310d99fSKrzysztof Kozlowski }
1437310d99fSKrzysztof Kozlowski 
1447310d99fSKrzysztof Kozlowski /**
1457310d99fSKrzysztof Kozlowski  * exynos_cpu_power_up : power up the specified cpu
1467310d99fSKrzysztof Kozlowski  * @cpu : the cpu to power up
1477310d99fSKrzysztof Kozlowski  *
1487310d99fSKrzysztof Kozlowski  * Power up the specified cpu
1497310d99fSKrzysztof Kozlowski  */
1507310d99fSKrzysztof Kozlowski void exynos_cpu_power_up(int cpu)
1517310d99fSKrzysztof Kozlowski {
152944483d0SArnd Bergmann 	pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
1537310d99fSKrzysztof Kozlowski 			EXYNOS_ARM_CORE_CONFIGURATION(cpu));
1547310d99fSKrzysztof Kozlowski }
1557310d99fSKrzysztof Kozlowski 
1567310d99fSKrzysztof Kozlowski /**
1577310d99fSKrzysztof Kozlowski  * exynos_cpu_power_state : returns the power state of the cpu
1587310d99fSKrzysztof Kozlowski  * @cpu : the cpu to retrieve the power state from
1597310d99fSKrzysztof Kozlowski  *
1607310d99fSKrzysztof Kozlowski  */
1617310d99fSKrzysztof Kozlowski int exynos_cpu_power_state(int cpu)
1627310d99fSKrzysztof Kozlowski {
163944483d0SArnd Bergmann 	return (pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(cpu)) &
1647310d99fSKrzysztof Kozlowski 			S5P_CORE_LOCAL_PWR_EN);
1657310d99fSKrzysztof Kozlowski }
1667310d99fSKrzysztof Kozlowski 
1677310d99fSKrzysztof Kozlowski /**
1687310d99fSKrzysztof Kozlowski  * exynos_cluster_power_down : power down the specified cluster
1697310d99fSKrzysztof Kozlowski  * @cluster : the cluster to power down
1707310d99fSKrzysztof Kozlowski  */
1717310d99fSKrzysztof Kozlowski void exynos_cluster_power_down(int cluster)
1727310d99fSKrzysztof Kozlowski {
173944483d0SArnd Bergmann 	pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(cluster));
1747310d99fSKrzysztof Kozlowski }
1757310d99fSKrzysztof Kozlowski 
1767310d99fSKrzysztof Kozlowski /**
1777310d99fSKrzysztof Kozlowski  * exynos_cluster_power_up : power up the specified cluster
1787310d99fSKrzysztof Kozlowski  * @cluster : the cluster to power up
1797310d99fSKrzysztof Kozlowski  */
1807310d99fSKrzysztof Kozlowski void exynos_cluster_power_up(int cluster)
1817310d99fSKrzysztof Kozlowski {
182944483d0SArnd Bergmann 	pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
1837310d99fSKrzysztof Kozlowski 			EXYNOS_COMMON_CONFIGURATION(cluster));
1847310d99fSKrzysztof Kozlowski }
1857310d99fSKrzysztof Kozlowski 
1867310d99fSKrzysztof Kozlowski /**
1877310d99fSKrzysztof Kozlowski  * exynos_cluster_power_state : returns the power state of the cluster
1887310d99fSKrzysztof Kozlowski  * @cluster : the cluster to retrieve the power state from
1897310d99fSKrzysztof Kozlowski  *
1907310d99fSKrzysztof Kozlowski  */
1917310d99fSKrzysztof Kozlowski int exynos_cluster_power_state(int cluster)
1927310d99fSKrzysztof Kozlowski {
193944483d0SArnd Bergmann 	return (pmu_raw_readl(EXYNOS_COMMON_STATUS(cluster)) &
1947310d99fSKrzysztof Kozlowski 		S5P_CORE_LOCAL_PWR_EN);
1957310d99fSKrzysztof Kozlowski }
1967310d99fSKrzysztof Kozlowski 
1971f054f52STomasz Figa static inline void __iomem *cpu_boot_reg_base(void)
1981f054f52STomasz Figa {
1991f054f52STomasz Figa 	if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_1_1)
2002e94ac42SPankaj Dubey 		return pmu_base_addr + S5P_INFORM5;
201b3205deaSSachin Kamat 	return sysram_base_addr;
2021f054f52STomasz Figa }
2031f054f52STomasz Figa 
2041f054f52STomasz Figa static inline void __iomem *cpu_boot_reg(int cpu)
2051f054f52STomasz Figa {
2061f054f52STomasz Figa 	void __iomem *boot_reg;
2071f054f52STomasz Figa 
2081f054f52STomasz Figa 	boot_reg = cpu_boot_reg_base();
209b3205deaSSachin Kamat 	if (!boot_reg)
210b3205deaSSachin Kamat 		return ERR_PTR(-ENODEV);
2111f054f52STomasz Figa 	if (soc_is_exynos4412())
2121f054f52STomasz Figa 		boot_reg += 4*cpu;
21386c6f148SArun Kumar K 	else if (soc_is_exynos5420() || soc_is_exynos5800())
2141580be3dSChander Kashyap 		boot_reg += 4;
2151f054f52STomasz Figa 	return boot_reg;
2161f054f52STomasz Figa }
21783014579SKukjin Kim 
21883014579SKukjin Kim /*
219b588aaecSKrzysztof Kozlowski  * Set wake up by local power mode and execute software reset for given core.
220b588aaecSKrzysztof Kozlowski  *
221b588aaecSKrzysztof Kozlowski  * Currently this is needed only when booting secondary CPU on Exynos3250.
222b588aaecSKrzysztof Kozlowski  */
223b588aaecSKrzysztof Kozlowski static void exynos_core_restart(u32 core_id)
224b588aaecSKrzysztof Kozlowski {
225b588aaecSKrzysztof Kozlowski 	u32 val;
226b588aaecSKrzysztof Kozlowski 
227b588aaecSKrzysztof Kozlowski 	if (!of_machine_is_compatible("samsung,exynos3250"))
228b588aaecSKrzysztof Kozlowski 		return;
229b588aaecSKrzysztof Kozlowski 
230b588aaecSKrzysztof Kozlowski 	val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id));
231b588aaecSKrzysztof Kozlowski 	val |= S5P_CORE_WAKEUP_FROM_LOCAL_CFG;
232b588aaecSKrzysztof Kozlowski 	pmu_raw_writel(val, EXYNOS_ARM_CORE_STATUS(core_id));
233b588aaecSKrzysztof Kozlowski 
234b588aaecSKrzysztof Kozlowski 	pr_info("CPU%u: Software reset\n", core_id);
235b588aaecSKrzysztof Kozlowski 	pmu_raw_writel(EXYNOS_CORE_PO_RESET(core_id), EXYNOS_SWRESET);
236b588aaecSKrzysztof Kozlowski }
237b588aaecSKrzysztof Kozlowski 
238b588aaecSKrzysztof Kozlowski /*
23983014579SKukjin Kim  * Write pen_release in a way that is guaranteed to be visible to all
24083014579SKukjin Kim  * observers, irrespective of whether they're taking part in coherency
24183014579SKukjin Kim  * or not.  This is necessary for the hotplug code to work reliably.
24283014579SKukjin Kim  */
24383014579SKukjin Kim static void write_pen_release(int val)
24483014579SKukjin Kim {
24583014579SKukjin Kim 	pen_release = val;
24683014579SKukjin Kim 	smp_wmb();
247f45913fdSNicolas Pitre 	sync_cache_w(&pen_release);
24883014579SKukjin Kim }
24983014579SKukjin Kim 
25083014579SKukjin Kim static void __iomem *scu_base_addr(void)
25183014579SKukjin Kim {
25283014579SKukjin Kim 	return (void __iomem *)(S5P_VA_SCU);
25383014579SKukjin Kim }
25483014579SKukjin Kim 
25583014579SKukjin Kim static DEFINE_SPINLOCK(boot_lock);
25683014579SKukjin Kim 
2578bd26e3aSPaul Gortmaker static void exynos_secondary_init(unsigned int cpu)
25883014579SKukjin Kim {
25983014579SKukjin Kim 	/*
26083014579SKukjin Kim 	 * let the primary processor know we're out of the
26183014579SKukjin Kim 	 * pen, then head off into the C entry point
26283014579SKukjin Kim 	 */
26383014579SKukjin Kim 	write_pen_release(-1);
26483014579SKukjin Kim 
26583014579SKukjin Kim 	/*
26683014579SKukjin Kim 	 * Synchronise with the boot thread.
26783014579SKukjin Kim 	 */
26883014579SKukjin Kim 	spin_lock(&boot_lock);
26983014579SKukjin Kim 	spin_unlock(&boot_lock);
27083014579SKukjin Kim }
27183014579SKukjin Kim 
2728bd26e3aSPaul Gortmaker static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
27383014579SKukjin Kim {
27483014579SKukjin Kim 	unsigned long timeout;
2759637f30eSTomasz Figa 	u32 mpidr = cpu_logical_map(cpu);
2769637f30eSTomasz Figa 	u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
277b3205deaSSachin Kamat 	int ret = -ENOSYS;
27883014579SKukjin Kim 
27983014579SKukjin Kim 	/*
28083014579SKukjin Kim 	 * Set synchronisation state between this boot processor
28183014579SKukjin Kim 	 * and the secondary one
28283014579SKukjin Kim 	 */
28383014579SKukjin Kim 	spin_lock(&boot_lock);
28483014579SKukjin Kim 
28583014579SKukjin Kim 	/*
28683014579SKukjin Kim 	 * The secondary processor is waiting to be released from
28783014579SKukjin Kim 	 * the holding pen - release it, then wait for it to flag
28883014579SKukjin Kim 	 * that it has been released by resetting pen_release.
28983014579SKukjin Kim 	 *
2909637f30eSTomasz Figa 	 * Note that "pen_release" is the hardware CPU core ID, whereas
29183014579SKukjin Kim 	 * "cpu" is Linux's internal ID.
29283014579SKukjin Kim 	 */
2939637f30eSTomasz Figa 	write_pen_release(core_id);
29483014579SKukjin Kim 
2959637f30eSTomasz Figa 	if (!exynos_cpu_power_state(core_id)) {
2969637f30eSTomasz Figa 		exynos_cpu_power_up(core_id);
29783014579SKukjin Kim 		timeout = 10;
29883014579SKukjin Kim 
29983014579SKukjin Kim 		/* wait max 10 ms until cpu1 is on */
3009637f30eSTomasz Figa 		while (exynos_cpu_power_state(core_id)
3019637f30eSTomasz Figa 		       != S5P_CORE_LOCAL_PWR_EN) {
30283014579SKukjin Kim 			if (timeout-- == 0)
30383014579SKukjin Kim 				break;
30483014579SKukjin Kim 
30583014579SKukjin Kim 			mdelay(1);
30683014579SKukjin Kim 		}
30783014579SKukjin Kim 
30883014579SKukjin Kim 		if (timeout == 0) {
30983014579SKukjin Kim 			printk(KERN_ERR "cpu1 power enable failed");
31083014579SKukjin Kim 			spin_unlock(&boot_lock);
31183014579SKukjin Kim 			return -ETIMEDOUT;
31283014579SKukjin Kim 		}
31383014579SKukjin Kim 	}
314b588aaecSKrzysztof Kozlowski 
315b588aaecSKrzysztof Kozlowski 	exynos_core_restart(core_id);
316b588aaecSKrzysztof Kozlowski 
31783014579SKukjin Kim 	/*
31883014579SKukjin Kim 	 * Send the secondary CPU a soft interrupt, thereby causing
31983014579SKukjin Kim 	 * the boot monitor to read the system wide flags register,
32083014579SKukjin Kim 	 * and branch to the address found there.
32183014579SKukjin Kim 	 */
32283014579SKukjin Kim 
32383014579SKukjin Kim 	timeout = jiffies + (1 * HZ);
32483014579SKukjin Kim 	while (time_before(jiffies, timeout)) {
325beddf63fSTomasz Figa 		unsigned long boot_addr;
326beddf63fSTomasz Figa 
32783014579SKukjin Kim 		smp_rmb();
32883014579SKukjin Kim 
329beddf63fSTomasz Figa 		boot_addr = virt_to_phys(exynos4_secondary_startup);
330beddf63fSTomasz Figa 
331beddf63fSTomasz Figa 		/*
332beddf63fSTomasz Figa 		 * Try to set boot address using firmware first
333beddf63fSTomasz Figa 		 * and fall back to boot register if it fails.
334beddf63fSTomasz Figa 		 */
3359637f30eSTomasz Figa 		ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
336b3205deaSSachin Kamat 		if (ret && ret != -ENOSYS)
337b3205deaSSachin Kamat 			goto fail;
338b3205deaSSachin Kamat 		if (ret == -ENOSYS) {
3399637f30eSTomasz Figa 			void __iomem *boot_reg = cpu_boot_reg(core_id);
340b3205deaSSachin Kamat 
341b3205deaSSachin Kamat 			if (IS_ERR(boot_reg)) {
342b3205deaSSachin Kamat 				ret = PTR_ERR(boot_reg);
343b3205deaSSachin Kamat 				goto fail;
344b3205deaSSachin Kamat 			}
34568ba947cSKrzysztof Kozlowski 			__raw_writel(boot_addr, boot_reg);
346b3205deaSSachin Kamat 		}
347beddf63fSTomasz Figa 
3489637f30eSTomasz Figa 		call_firmware_op(cpu_boot, core_id);
349beddf63fSTomasz Figa 
350b1cffebfSRob Herring 		arch_send_wakeup_ipi_mask(cpumask_of(cpu));
35183014579SKukjin Kim 
35283014579SKukjin Kim 		if (pen_release == -1)
35383014579SKukjin Kim 			break;
35483014579SKukjin Kim 
35583014579SKukjin Kim 		udelay(10);
35683014579SKukjin Kim 	}
35783014579SKukjin Kim 
35813cfa6c4SKrzysztof Kozlowski 	/* No harm if this is called during first boot of secondary CPU */
35913cfa6c4SKrzysztof Kozlowski 	exynos_set_delayed_reset_assertion(core_id, false);
36013cfa6c4SKrzysztof Kozlowski 
36183014579SKukjin Kim 	/*
36283014579SKukjin Kim 	 * now the secondary core is starting up let it run its
36383014579SKukjin Kim 	 * calibrations, then wait for it to finish
36483014579SKukjin Kim 	 */
365b3205deaSSachin Kamat fail:
36683014579SKukjin Kim 	spin_unlock(&boot_lock);
36783014579SKukjin Kim 
368b3205deaSSachin Kamat 	return pen_release != -1 ? ret : 0;
36983014579SKukjin Kim }
37083014579SKukjin Kim 
37183014579SKukjin Kim /*
37283014579SKukjin Kim  * Initialise the CPU possible map early - this describes the CPUs
37383014579SKukjin Kim  * which may be present or become present in the system.
37483014579SKukjin Kim  */
37583014579SKukjin Kim 
37606853ae4SMarc Zyngier static void __init exynos_smp_init_cpus(void)
37783014579SKukjin Kim {
37883014579SKukjin Kim 	void __iomem *scu_base = scu_base_addr();
37983014579SKukjin Kim 	unsigned int i, ncores;
38083014579SKukjin Kim 
381af040ffcSRussell King 	if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
38283014579SKukjin Kim 		ncores = scu_base ? scu_get_core_count(scu_base) : 1;
3831897d2f3SChander Kashyap 	else
3841897d2f3SChander Kashyap 		/*
3851897d2f3SChander Kashyap 		 * CPU Nodes are passed thru DT and set_cpu_possible
3861897d2f3SChander Kashyap 		 * is set by "arm_dt_init_cpu_maps".
3871897d2f3SChander Kashyap 		 */
3881897d2f3SChander Kashyap 		return;
38983014579SKukjin Kim 
39083014579SKukjin Kim 	/* sanity check */
39183014579SKukjin Kim 	if (ncores > nr_cpu_ids) {
39283014579SKukjin Kim 		pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
39383014579SKukjin Kim 			ncores, nr_cpu_ids);
39483014579SKukjin Kim 		ncores = nr_cpu_ids;
39583014579SKukjin Kim 	}
39683014579SKukjin Kim 
39783014579SKukjin Kim 	for (i = 0; i < ncores; i++)
39883014579SKukjin Kim 		set_cpu_possible(i, true);
39983014579SKukjin Kim }
40083014579SKukjin Kim 
40106853ae4SMarc Zyngier static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
40283014579SKukjin Kim {
4031f054f52STomasz Figa 	int i;
4041f054f52STomasz Figa 
4051754c42eSOlof Johansson 	exynos_sysram_init();
4061754c42eSOlof Johansson 
407af040ffcSRussell King 	if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
40883014579SKukjin Kim 		scu_enable(scu_base_addr());
40983014579SKukjin Kim 
41083014579SKukjin Kim 	/*
41183014579SKukjin Kim 	 * Write the address of secondary startup into the
41283014579SKukjin Kim 	 * system-wide flags register. The boot monitor waits
41383014579SKukjin Kim 	 * until it receives a soft interrupt, and then the
41483014579SKukjin Kim 	 * secondary CPU branches to this address.
415beddf63fSTomasz Figa 	 *
416beddf63fSTomasz Figa 	 * Try using firmware operation first and fall back to
417beddf63fSTomasz Figa 	 * boot register if it fails.
41883014579SKukjin Kim 	 */
419beddf63fSTomasz Figa 	for (i = 1; i < max_cpus; ++i) {
420beddf63fSTomasz Figa 		unsigned long boot_addr;
4219637f30eSTomasz Figa 		u32 mpidr;
4229637f30eSTomasz Figa 		u32 core_id;
423b3205deaSSachin Kamat 		int ret;
424beddf63fSTomasz Figa 
4259637f30eSTomasz Figa 		mpidr = cpu_logical_map(i);
4269637f30eSTomasz Figa 		core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
427beddf63fSTomasz Figa 		boot_addr = virt_to_phys(exynos4_secondary_startup);
428beddf63fSTomasz Figa 
4299637f30eSTomasz Figa 		ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
430b3205deaSSachin Kamat 		if (ret && ret != -ENOSYS)
431b3205deaSSachin Kamat 			break;
432b3205deaSSachin Kamat 		if (ret == -ENOSYS) {
4339637f30eSTomasz Figa 			void __iomem *boot_reg = cpu_boot_reg(core_id);
434b3205deaSSachin Kamat 
435b3205deaSSachin Kamat 			if (IS_ERR(boot_reg))
436b3205deaSSachin Kamat 				break;
43768ba947cSKrzysztof Kozlowski 			__raw_writel(boot_addr, boot_reg);
438beddf63fSTomasz Figa 		}
43983014579SKukjin Kim 	}
440b3205deaSSachin Kamat }
44106853ae4SMarc Zyngier 
4426f0b7c0cSKrzysztof Kozlowski #ifdef CONFIG_HOTPLUG_CPU
4436f0b7c0cSKrzysztof Kozlowski /*
4446f0b7c0cSKrzysztof Kozlowski  * platform-specific code to shutdown a CPU
4456f0b7c0cSKrzysztof Kozlowski  *
4466f0b7c0cSKrzysztof Kozlowski  * Called with IRQs disabled
4476f0b7c0cSKrzysztof Kozlowski  */
44827b9ee85SKrzysztof Kozlowski static void exynos_cpu_die(unsigned int cpu)
4496f0b7c0cSKrzysztof Kozlowski {
4506f0b7c0cSKrzysztof Kozlowski 	int spurious = 0;
45113cfa6c4SKrzysztof Kozlowski 	u32 mpidr = cpu_logical_map(cpu);
45213cfa6c4SKrzysztof Kozlowski 	u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
4536f0b7c0cSKrzysztof Kozlowski 
4546f0b7c0cSKrzysztof Kozlowski 	v7_exit_coherency_flush(louis);
4556f0b7c0cSKrzysztof Kozlowski 
4566f0b7c0cSKrzysztof Kozlowski 	platform_do_lowpower(cpu, &spurious);
4576f0b7c0cSKrzysztof Kozlowski 
4586f0b7c0cSKrzysztof Kozlowski 	/*
4596f0b7c0cSKrzysztof Kozlowski 	 * bring this CPU back into the world of cache
4606f0b7c0cSKrzysztof Kozlowski 	 * coherency, and then restore interrupts
4616f0b7c0cSKrzysztof Kozlowski 	 */
46213cfa6c4SKrzysztof Kozlowski 	cpu_leave_lowpower(core_id);
4636f0b7c0cSKrzysztof Kozlowski 
4646f0b7c0cSKrzysztof Kozlowski 	if (spurious)
4656f0b7c0cSKrzysztof Kozlowski 		pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
4666f0b7c0cSKrzysztof Kozlowski }
4676f0b7c0cSKrzysztof Kozlowski #endif /* CONFIG_HOTPLUG_CPU */
4686f0b7c0cSKrzysztof Kozlowski 
46906853ae4SMarc Zyngier struct smp_operations exynos_smp_ops __initdata = {
47006853ae4SMarc Zyngier 	.smp_init_cpus		= exynos_smp_init_cpus,
47106853ae4SMarc Zyngier 	.smp_prepare_cpus	= exynos_smp_prepare_cpus,
47206853ae4SMarc Zyngier 	.smp_secondary_init	= exynos_secondary_init,
47306853ae4SMarc Zyngier 	.smp_boot_secondary	= exynos_boot_secondary,
47406853ae4SMarc Zyngier #ifdef CONFIG_HOTPLUG_CPU
47506853ae4SMarc Zyngier 	.cpu_die		= exynos_cpu_die,
47606853ae4SMarc Zyngier #endif
47706853ae4SMarc Zyngier };
478