xref: /openbmc/linux/arch/arm/mach-exynos/platsmp.c (revision 13cfa6c4f7facfc690ba9e99ec382c151fddaced)
14552386aSPankaj Dubey  /*
283014579SKukjin Kim  * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
383014579SKukjin Kim  *		http://www.samsung.com
483014579SKukjin Kim  *
583014579SKukjin Kim  * Cloned from linux/arch/arm/mach-vexpress/platsmp.c
683014579SKukjin Kim  *
783014579SKukjin Kim  *  Copyright (C) 2002 ARM Ltd.
883014579SKukjin Kim  *  All Rights Reserved
983014579SKukjin Kim  *
1083014579SKukjin Kim  * This program is free software; you can redistribute it and/or modify
1183014579SKukjin Kim  * it under the terms of the GNU General Public License version 2 as
1283014579SKukjin Kim  * published by the Free Software Foundation.
1383014579SKukjin Kim */
1483014579SKukjin Kim 
1583014579SKukjin Kim #include <linux/init.h>
1683014579SKukjin Kim #include <linux/errno.h>
1783014579SKukjin Kim #include <linux/delay.h>
1883014579SKukjin Kim #include <linux/device.h>
1983014579SKukjin Kim #include <linux/jiffies.h>
2083014579SKukjin Kim #include <linux/smp.h>
2183014579SKukjin Kim #include <linux/io.h>
22b3205deaSSachin Kamat #include <linux/of_address.h>
2383014579SKukjin Kim 
2483014579SKukjin Kim #include <asm/cacheflush.h>
256f0b7c0cSKrzysztof Kozlowski #include <asm/cp15.h>
26eb50439bSWill Deacon #include <asm/smp_plat.h>
2783014579SKukjin Kim #include <asm/smp_scu.h>
28beddf63fSTomasz Figa #include <asm/firmware.h>
2983014579SKukjin Kim 
302e94ac42SPankaj Dubey #include <mach/map.h>
312e94ac42SPankaj Dubey 
3206853ae4SMarc Zyngier #include "common.h"
3365c9a853SKukjin Kim #include "regs-pmu.h"
3406853ae4SMarc Zyngier 
3583014579SKukjin Kim extern void exynos4_secondary_startup(void);
3683014579SKukjin Kim 
37*13cfa6c4SKrzysztof Kozlowski /*
38*13cfa6c4SKrzysztof Kozlowski  * Set or clear the USE_DELAYED_RESET_ASSERTION option, set on Exynos4 SoCs
39*13cfa6c4SKrzysztof Kozlowski  * during hot-(un)plugging CPUx.
40*13cfa6c4SKrzysztof Kozlowski  *
41*13cfa6c4SKrzysztof Kozlowski  * The feature can be cleared safely during first boot of secondary CPU.
42*13cfa6c4SKrzysztof Kozlowski  *
43*13cfa6c4SKrzysztof Kozlowski  * Exynos4 SoCs require setting USE_DELAYED_RESET_ASSERTION during powering
44*13cfa6c4SKrzysztof Kozlowski  * down a CPU so the CPU idle clock down feature could properly detect global
45*13cfa6c4SKrzysztof Kozlowski  * idle state when CPUx is off.
46*13cfa6c4SKrzysztof Kozlowski  */
47*13cfa6c4SKrzysztof Kozlowski static void exynos_set_delayed_reset_assertion(u32 core_id, bool enable)
48*13cfa6c4SKrzysztof Kozlowski {
49*13cfa6c4SKrzysztof Kozlowski 	if (soc_is_exynos4()) {
50*13cfa6c4SKrzysztof Kozlowski 		unsigned int tmp;
51*13cfa6c4SKrzysztof Kozlowski 
52*13cfa6c4SKrzysztof Kozlowski 		tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id));
53*13cfa6c4SKrzysztof Kozlowski 		if (enable)
54*13cfa6c4SKrzysztof Kozlowski 			tmp |= S5P_USE_DELAYED_RESET_ASSERTION;
55*13cfa6c4SKrzysztof Kozlowski 		else
56*13cfa6c4SKrzysztof Kozlowski 			tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION);
57*13cfa6c4SKrzysztof Kozlowski 		pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id));
58*13cfa6c4SKrzysztof Kozlowski 	}
59*13cfa6c4SKrzysztof Kozlowski }
60*13cfa6c4SKrzysztof Kozlowski 
616f0b7c0cSKrzysztof Kozlowski #ifdef CONFIG_HOTPLUG_CPU
62*13cfa6c4SKrzysztof Kozlowski static inline void cpu_leave_lowpower(u32 core_id)
636f0b7c0cSKrzysztof Kozlowski {
646f0b7c0cSKrzysztof Kozlowski 	unsigned int v;
656f0b7c0cSKrzysztof Kozlowski 
666f0b7c0cSKrzysztof Kozlowski 	asm volatile(
676f0b7c0cSKrzysztof Kozlowski 	"mrc	p15, 0, %0, c1, c0, 0\n"
686f0b7c0cSKrzysztof Kozlowski 	"	orr	%0, %0, %1\n"
696f0b7c0cSKrzysztof Kozlowski 	"	mcr	p15, 0, %0, c1, c0, 0\n"
706f0b7c0cSKrzysztof Kozlowski 	"	mrc	p15, 0, %0, c1, c0, 1\n"
716f0b7c0cSKrzysztof Kozlowski 	"	orr	%0, %0, %2\n"
726f0b7c0cSKrzysztof Kozlowski 	"	mcr	p15, 0, %0, c1, c0, 1\n"
736f0b7c0cSKrzysztof Kozlowski 	  : "=&r" (v)
746f0b7c0cSKrzysztof Kozlowski 	  : "Ir" (CR_C), "Ir" (0x40)
756f0b7c0cSKrzysztof Kozlowski 	  : "cc");
76*13cfa6c4SKrzysztof Kozlowski 
77*13cfa6c4SKrzysztof Kozlowski 	 exynos_set_delayed_reset_assertion(core_id, false);
786f0b7c0cSKrzysztof Kozlowski }
796f0b7c0cSKrzysztof Kozlowski 
806f0b7c0cSKrzysztof Kozlowski static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
816f0b7c0cSKrzysztof Kozlowski {
826f0b7c0cSKrzysztof Kozlowski 	u32 mpidr = cpu_logical_map(cpu);
836f0b7c0cSKrzysztof Kozlowski 	u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
846f0b7c0cSKrzysztof Kozlowski 
856f0b7c0cSKrzysztof Kozlowski 	for (;;) {
866f0b7c0cSKrzysztof Kozlowski 
876f0b7c0cSKrzysztof Kozlowski 		/* Turn the CPU off on next WFI instruction. */
886f0b7c0cSKrzysztof Kozlowski 		exynos_cpu_power_down(core_id);
896f0b7c0cSKrzysztof Kozlowski 
90*13cfa6c4SKrzysztof Kozlowski 		/*
91*13cfa6c4SKrzysztof Kozlowski 		 * Exynos4 SoCs require setting
92*13cfa6c4SKrzysztof Kozlowski 		 * USE_DELAYED_RESET_ASSERTION so the CPU idle
93*13cfa6c4SKrzysztof Kozlowski 		 * clock down feature could properly detect
94*13cfa6c4SKrzysztof Kozlowski 		 * global idle state when CPUx is off.
95*13cfa6c4SKrzysztof Kozlowski 		 */
96*13cfa6c4SKrzysztof Kozlowski 		exynos_set_delayed_reset_assertion(core_id, true);
97*13cfa6c4SKrzysztof Kozlowski 
986f0b7c0cSKrzysztof Kozlowski 		wfi();
996f0b7c0cSKrzysztof Kozlowski 
1006f0b7c0cSKrzysztof Kozlowski 		if (pen_release == core_id) {
1016f0b7c0cSKrzysztof Kozlowski 			/*
1026f0b7c0cSKrzysztof Kozlowski 			 * OK, proper wakeup, we're done
1036f0b7c0cSKrzysztof Kozlowski 			 */
1046f0b7c0cSKrzysztof Kozlowski 			break;
1056f0b7c0cSKrzysztof Kozlowski 		}
1066f0b7c0cSKrzysztof Kozlowski 
1076f0b7c0cSKrzysztof Kozlowski 		/*
1086f0b7c0cSKrzysztof Kozlowski 		 * Getting here, means that we have come out of WFI without
1096f0b7c0cSKrzysztof Kozlowski 		 * having been woken up - this shouldn't happen
1106f0b7c0cSKrzysztof Kozlowski 		 *
1116f0b7c0cSKrzysztof Kozlowski 		 * Just note it happening - when we're woken, we can report
1126f0b7c0cSKrzysztof Kozlowski 		 * its occurrence.
1136f0b7c0cSKrzysztof Kozlowski 		 */
1146f0b7c0cSKrzysztof Kozlowski 		(*spurious)++;
1156f0b7c0cSKrzysztof Kozlowski 	}
1166f0b7c0cSKrzysztof Kozlowski }
1176f0b7c0cSKrzysztof Kozlowski #endif /* CONFIG_HOTPLUG_CPU */
1186f0b7c0cSKrzysztof Kozlowski 
1197310d99fSKrzysztof Kozlowski /**
1207310d99fSKrzysztof Kozlowski  * exynos_core_power_down : power down the specified cpu
1217310d99fSKrzysztof Kozlowski  * @cpu : the cpu to power down
1227310d99fSKrzysztof Kozlowski  *
1237310d99fSKrzysztof Kozlowski  * Power down the specified cpu. The sequence must be finished by a
1247310d99fSKrzysztof Kozlowski  * call to cpu_do_idle()
1257310d99fSKrzysztof Kozlowski  *
1267310d99fSKrzysztof Kozlowski  */
1277310d99fSKrzysztof Kozlowski void exynos_cpu_power_down(int cpu)
1287310d99fSKrzysztof Kozlowski {
129944483d0SArnd Bergmann 	pmu_raw_writel(0, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
1307310d99fSKrzysztof Kozlowski }
1317310d99fSKrzysztof Kozlowski 
1327310d99fSKrzysztof Kozlowski /**
1337310d99fSKrzysztof Kozlowski  * exynos_cpu_power_up : power up the specified cpu
1347310d99fSKrzysztof Kozlowski  * @cpu : the cpu to power up
1357310d99fSKrzysztof Kozlowski  *
1367310d99fSKrzysztof Kozlowski  * Power up the specified cpu
1377310d99fSKrzysztof Kozlowski  */
1387310d99fSKrzysztof Kozlowski void exynos_cpu_power_up(int cpu)
1397310d99fSKrzysztof Kozlowski {
140944483d0SArnd Bergmann 	pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
1417310d99fSKrzysztof Kozlowski 			EXYNOS_ARM_CORE_CONFIGURATION(cpu));
1427310d99fSKrzysztof Kozlowski }
1437310d99fSKrzysztof Kozlowski 
1447310d99fSKrzysztof Kozlowski /**
1457310d99fSKrzysztof Kozlowski  * exynos_cpu_power_state : returns the power state of the cpu
1467310d99fSKrzysztof Kozlowski  * @cpu : the cpu to retrieve the power state from
1477310d99fSKrzysztof Kozlowski  *
1487310d99fSKrzysztof Kozlowski  */
1497310d99fSKrzysztof Kozlowski int exynos_cpu_power_state(int cpu)
1507310d99fSKrzysztof Kozlowski {
151944483d0SArnd Bergmann 	return (pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(cpu)) &
1527310d99fSKrzysztof Kozlowski 			S5P_CORE_LOCAL_PWR_EN);
1537310d99fSKrzysztof Kozlowski }
1547310d99fSKrzysztof Kozlowski 
1557310d99fSKrzysztof Kozlowski /**
1567310d99fSKrzysztof Kozlowski  * exynos_cluster_power_down : power down the specified cluster
1577310d99fSKrzysztof Kozlowski  * @cluster : the cluster to power down
1587310d99fSKrzysztof Kozlowski  */
1597310d99fSKrzysztof Kozlowski void exynos_cluster_power_down(int cluster)
1607310d99fSKrzysztof Kozlowski {
161944483d0SArnd Bergmann 	pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(cluster));
1627310d99fSKrzysztof Kozlowski }
1637310d99fSKrzysztof Kozlowski 
1647310d99fSKrzysztof Kozlowski /**
1657310d99fSKrzysztof Kozlowski  * exynos_cluster_power_up : power up the specified cluster
1667310d99fSKrzysztof Kozlowski  * @cluster : the cluster to power up
1677310d99fSKrzysztof Kozlowski  */
1687310d99fSKrzysztof Kozlowski void exynos_cluster_power_up(int cluster)
1697310d99fSKrzysztof Kozlowski {
170944483d0SArnd Bergmann 	pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
1717310d99fSKrzysztof Kozlowski 			EXYNOS_COMMON_CONFIGURATION(cluster));
1727310d99fSKrzysztof Kozlowski }
1737310d99fSKrzysztof Kozlowski 
1747310d99fSKrzysztof Kozlowski /**
1757310d99fSKrzysztof Kozlowski  * exynos_cluster_power_state : returns the power state of the cluster
1767310d99fSKrzysztof Kozlowski  * @cluster : the cluster to retrieve the power state from
1777310d99fSKrzysztof Kozlowski  *
1787310d99fSKrzysztof Kozlowski  */
1797310d99fSKrzysztof Kozlowski int exynos_cluster_power_state(int cluster)
1807310d99fSKrzysztof Kozlowski {
181944483d0SArnd Bergmann 	return (pmu_raw_readl(EXYNOS_COMMON_STATUS(cluster)) &
1827310d99fSKrzysztof Kozlowski 		S5P_CORE_LOCAL_PWR_EN);
1837310d99fSKrzysztof Kozlowski }
1847310d99fSKrzysztof Kozlowski 
1851f054f52STomasz Figa static inline void __iomem *cpu_boot_reg_base(void)
1861f054f52STomasz Figa {
1871f054f52STomasz Figa 	if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_1_1)
1882e94ac42SPankaj Dubey 		return pmu_base_addr + S5P_INFORM5;
189b3205deaSSachin Kamat 	return sysram_base_addr;
1901f054f52STomasz Figa }
1911f054f52STomasz Figa 
1921f054f52STomasz Figa static inline void __iomem *cpu_boot_reg(int cpu)
1931f054f52STomasz Figa {
1941f054f52STomasz Figa 	void __iomem *boot_reg;
1951f054f52STomasz Figa 
1961f054f52STomasz Figa 	boot_reg = cpu_boot_reg_base();
197b3205deaSSachin Kamat 	if (!boot_reg)
198b3205deaSSachin Kamat 		return ERR_PTR(-ENODEV);
1991f054f52STomasz Figa 	if (soc_is_exynos4412())
2001f054f52STomasz Figa 		boot_reg += 4*cpu;
20186c6f148SArun Kumar K 	else if (soc_is_exynos5420() || soc_is_exynos5800())
2021580be3dSChander Kashyap 		boot_reg += 4;
2031f054f52STomasz Figa 	return boot_reg;
2041f054f52STomasz Figa }
20583014579SKukjin Kim 
20683014579SKukjin Kim /*
20783014579SKukjin Kim  * Write pen_release in a way that is guaranteed to be visible to all
20883014579SKukjin Kim  * observers, irrespective of whether they're taking part in coherency
20983014579SKukjin Kim  * or not.  This is necessary for the hotplug code to work reliably.
21083014579SKukjin Kim  */
21183014579SKukjin Kim static void write_pen_release(int val)
21283014579SKukjin Kim {
21383014579SKukjin Kim 	pen_release = val;
21483014579SKukjin Kim 	smp_wmb();
215f45913fdSNicolas Pitre 	sync_cache_w(&pen_release);
21683014579SKukjin Kim }
21783014579SKukjin Kim 
21883014579SKukjin Kim static void __iomem *scu_base_addr(void)
21983014579SKukjin Kim {
22083014579SKukjin Kim 	return (void __iomem *)(S5P_VA_SCU);
22183014579SKukjin Kim }
22283014579SKukjin Kim 
22383014579SKukjin Kim static DEFINE_SPINLOCK(boot_lock);
22483014579SKukjin Kim 
2258bd26e3aSPaul Gortmaker static void exynos_secondary_init(unsigned int cpu)
22683014579SKukjin Kim {
22783014579SKukjin Kim 	/*
22883014579SKukjin Kim 	 * let the primary processor know we're out of the
22983014579SKukjin Kim 	 * pen, then head off into the C entry point
23083014579SKukjin Kim 	 */
23183014579SKukjin Kim 	write_pen_release(-1);
23283014579SKukjin Kim 
23383014579SKukjin Kim 	/*
23483014579SKukjin Kim 	 * Synchronise with the boot thread.
23583014579SKukjin Kim 	 */
23683014579SKukjin Kim 	spin_lock(&boot_lock);
23783014579SKukjin Kim 	spin_unlock(&boot_lock);
23883014579SKukjin Kim }
23983014579SKukjin Kim 
2408bd26e3aSPaul Gortmaker static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
24183014579SKukjin Kim {
24283014579SKukjin Kim 	unsigned long timeout;
2439637f30eSTomasz Figa 	u32 mpidr = cpu_logical_map(cpu);
2449637f30eSTomasz Figa 	u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
245b3205deaSSachin Kamat 	int ret = -ENOSYS;
24683014579SKukjin Kim 
24783014579SKukjin Kim 	/*
24883014579SKukjin Kim 	 * Set synchronisation state between this boot processor
24983014579SKukjin Kim 	 * and the secondary one
25083014579SKukjin Kim 	 */
25183014579SKukjin Kim 	spin_lock(&boot_lock);
25283014579SKukjin Kim 
25383014579SKukjin Kim 	/*
25483014579SKukjin Kim 	 * The secondary processor is waiting to be released from
25583014579SKukjin Kim 	 * the holding pen - release it, then wait for it to flag
25683014579SKukjin Kim 	 * that it has been released by resetting pen_release.
25783014579SKukjin Kim 	 *
2589637f30eSTomasz Figa 	 * Note that "pen_release" is the hardware CPU core ID, whereas
25983014579SKukjin Kim 	 * "cpu" is Linux's internal ID.
26083014579SKukjin Kim 	 */
2619637f30eSTomasz Figa 	write_pen_release(core_id);
26283014579SKukjin Kim 
2639637f30eSTomasz Figa 	if (!exynos_cpu_power_state(core_id)) {
2649637f30eSTomasz Figa 		exynos_cpu_power_up(core_id);
26583014579SKukjin Kim 		timeout = 10;
26683014579SKukjin Kim 
26783014579SKukjin Kim 		/* wait max 10 ms until cpu1 is on */
2689637f30eSTomasz Figa 		while (exynos_cpu_power_state(core_id)
2699637f30eSTomasz Figa 		       != S5P_CORE_LOCAL_PWR_EN) {
27083014579SKukjin Kim 			if (timeout-- == 0)
27183014579SKukjin Kim 				break;
27283014579SKukjin Kim 
27383014579SKukjin Kim 			mdelay(1);
27483014579SKukjin Kim 		}
27583014579SKukjin Kim 
27683014579SKukjin Kim 		if (timeout == 0) {
27783014579SKukjin Kim 			printk(KERN_ERR "cpu1 power enable failed");
27883014579SKukjin Kim 			spin_unlock(&boot_lock);
27983014579SKukjin Kim 			return -ETIMEDOUT;
28083014579SKukjin Kim 		}
28183014579SKukjin Kim 	}
28283014579SKukjin Kim 	/*
28383014579SKukjin Kim 	 * Send the secondary CPU a soft interrupt, thereby causing
28483014579SKukjin Kim 	 * the boot monitor to read the system wide flags register,
28583014579SKukjin Kim 	 * and branch to the address found there.
28683014579SKukjin Kim 	 */
28783014579SKukjin Kim 
28883014579SKukjin Kim 	timeout = jiffies + (1 * HZ);
28983014579SKukjin Kim 	while (time_before(jiffies, timeout)) {
290beddf63fSTomasz Figa 		unsigned long boot_addr;
291beddf63fSTomasz Figa 
29283014579SKukjin Kim 		smp_rmb();
29383014579SKukjin Kim 
294beddf63fSTomasz Figa 		boot_addr = virt_to_phys(exynos4_secondary_startup);
295beddf63fSTomasz Figa 
296beddf63fSTomasz Figa 		/*
297beddf63fSTomasz Figa 		 * Try to set boot address using firmware first
298beddf63fSTomasz Figa 		 * and fall back to boot register if it fails.
299beddf63fSTomasz Figa 		 */
3009637f30eSTomasz Figa 		ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
301b3205deaSSachin Kamat 		if (ret && ret != -ENOSYS)
302b3205deaSSachin Kamat 			goto fail;
303b3205deaSSachin Kamat 		if (ret == -ENOSYS) {
3049637f30eSTomasz Figa 			void __iomem *boot_reg = cpu_boot_reg(core_id);
305b3205deaSSachin Kamat 
306b3205deaSSachin Kamat 			if (IS_ERR(boot_reg)) {
307b3205deaSSachin Kamat 				ret = PTR_ERR(boot_reg);
308b3205deaSSachin Kamat 				goto fail;
309b3205deaSSachin Kamat 			}
31068ba947cSKrzysztof Kozlowski 			__raw_writel(boot_addr, boot_reg);
311b3205deaSSachin Kamat 		}
312beddf63fSTomasz Figa 
3139637f30eSTomasz Figa 		call_firmware_op(cpu_boot, core_id);
314beddf63fSTomasz Figa 
315b1cffebfSRob Herring 		arch_send_wakeup_ipi_mask(cpumask_of(cpu));
31683014579SKukjin Kim 
31783014579SKukjin Kim 		if (pen_release == -1)
31883014579SKukjin Kim 			break;
31983014579SKukjin Kim 
32083014579SKukjin Kim 		udelay(10);
32183014579SKukjin Kim 	}
32283014579SKukjin Kim 
323*13cfa6c4SKrzysztof Kozlowski 	/* No harm if this is called during first boot of secondary CPU */
324*13cfa6c4SKrzysztof Kozlowski 	exynos_set_delayed_reset_assertion(core_id, false);
325*13cfa6c4SKrzysztof Kozlowski 
32683014579SKukjin Kim 	/*
32783014579SKukjin Kim 	 * now the secondary core is starting up let it run its
32883014579SKukjin Kim 	 * calibrations, then wait for it to finish
32983014579SKukjin Kim 	 */
330b3205deaSSachin Kamat fail:
33183014579SKukjin Kim 	spin_unlock(&boot_lock);
33283014579SKukjin Kim 
333b3205deaSSachin Kamat 	return pen_release != -1 ? ret : 0;
33483014579SKukjin Kim }
33583014579SKukjin Kim 
33683014579SKukjin Kim /*
33783014579SKukjin Kim  * Initialise the CPU possible map early - this describes the CPUs
33883014579SKukjin Kim  * which may be present or become present in the system.
33983014579SKukjin Kim  */
34083014579SKukjin Kim 
34106853ae4SMarc Zyngier static void __init exynos_smp_init_cpus(void)
34283014579SKukjin Kim {
34383014579SKukjin Kim 	void __iomem *scu_base = scu_base_addr();
34483014579SKukjin Kim 	unsigned int i, ncores;
34583014579SKukjin Kim 
346af040ffcSRussell King 	if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
34783014579SKukjin Kim 		ncores = scu_base ? scu_get_core_count(scu_base) : 1;
3481897d2f3SChander Kashyap 	else
3491897d2f3SChander Kashyap 		/*
3501897d2f3SChander Kashyap 		 * CPU Nodes are passed thru DT and set_cpu_possible
3511897d2f3SChander Kashyap 		 * is set by "arm_dt_init_cpu_maps".
3521897d2f3SChander Kashyap 		 */
3531897d2f3SChander Kashyap 		return;
35483014579SKukjin Kim 
35583014579SKukjin Kim 	/* sanity check */
35683014579SKukjin Kim 	if (ncores > nr_cpu_ids) {
35783014579SKukjin Kim 		pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
35883014579SKukjin Kim 			ncores, nr_cpu_ids);
35983014579SKukjin Kim 		ncores = nr_cpu_ids;
36083014579SKukjin Kim 	}
36183014579SKukjin Kim 
36283014579SKukjin Kim 	for (i = 0; i < ncores; i++)
36383014579SKukjin Kim 		set_cpu_possible(i, true);
36483014579SKukjin Kim }
36583014579SKukjin Kim 
36606853ae4SMarc Zyngier static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
36783014579SKukjin Kim {
3681f054f52STomasz Figa 	int i;
3691f054f52STomasz Figa 
3701754c42eSOlof Johansson 	exynos_sysram_init();
3711754c42eSOlof Johansson 
372af040ffcSRussell King 	if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
37383014579SKukjin Kim 		scu_enable(scu_base_addr());
37483014579SKukjin Kim 
37583014579SKukjin Kim 	/*
37683014579SKukjin Kim 	 * Write the address of secondary startup into the
37783014579SKukjin Kim 	 * system-wide flags register. The boot monitor waits
37883014579SKukjin Kim 	 * until it receives a soft interrupt, and then the
37983014579SKukjin Kim 	 * secondary CPU branches to this address.
380beddf63fSTomasz Figa 	 *
381beddf63fSTomasz Figa 	 * Try using firmware operation first and fall back to
382beddf63fSTomasz Figa 	 * boot register if it fails.
38383014579SKukjin Kim 	 */
384beddf63fSTomasz Figa 	for (i = 1; i < max_cpus; ++i) {
385beddf63fSTomasz Figa 		unsigned long boot_addr;
3869637f30eSTomasz Figa 		u32 mpidr;
3879637f30eSTomasz Figa 		u32 core_id;
388b3205deaSSachin Kamat 		int ret;
389beddf63fSTomasz Figa 
3909637f30eSTomasz Figa 		mpidr = cpu_logical_map(i);
3919637f30eSTomasz Figa 		core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
392beddf63fSTomasz Figa 		boot_addr = virt_to_phys(exynos4_secondary_startup);
393beddf63fSTomasz Figa 
3949637f30eSTomasz Figa 		ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
395b3205deaSSachin Kamat 		if (ret && ret != -ENOSYS)
396b3205deaSSachin Kamat 			break;
397b3205deaSSachin Kamat 		if (ret == -ENOSYS) {
3989637f30eSTomasz Figa 			void __iomem *boot_reg = cpu_boot_reg(core_id);
399b3205deaSSachin Kamat 
400b3205deaSSachin Kamat 			if (IS_ERR(boot_reg))
401b3205deaSSachin Kamat 				break;
40268ba947cSKrzysztof Kozlowski 			__raw_writel(boot_addr, boot_reg);
403beddf63fSTomasz Figa 		}
40483014579SKukjin Kim 	}
405b3205deaSSachin Kamat }
40606853ae4SMarc Zyngier 
4076f0b7c0cSKrzysztof Kozlowski #ifdef CONFIG_HOTPLUG_CPU
4086f0b7c0cSKrzysztof Kozlowski /*
4096f0b7c0cSKrzysztof Kozlowski  * platform-specific code to shutdown a CPU
4106f0b7c0cSKrzysztof Kozlowski  *
4116f0b7c0cSKrzysztof Kozlowski  * Called with IRQs disabled
4126f0b7c0cSKrzysztof Kozlowski  */
41327b9ee85SKrzysztof Kozlowski static void exynos_cpu_die(unsigned int cpu)
4146f0b7c0cSKrzysztof Kozlowski {
4156f0b7c0cSKrzysztof Kozlowski 	int spurious = 0;
416*13cfa6c4SKrzysztof Kozlowski 	u32 mpidr = cpu_logical_map(cpu);
417*13cfa6c4SKrzysztof Kozlowski 	u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
4186f0b7c0cSKrzysztof Kozlowski 
4196f0b7c0cSKrzysztof Kozlowski 	v7_exit_coherency_flush(louis);
4206f0b7c0cSKrzysztof Kozlowski 
4216f0b7c0cSKrzysztof Kozlowski 	platform_do_lowpower(cpu, &spurious);
4226f0b7c0cSKrzysztof Kozlowski 
4236f0b7c0cSKrzysztof Kozlowski 	/*
4246f0b7c0cSKrzysztof Kozlowski 	 * bring this CPU back into the world of cache
4256f0b7c0cSKrzysztof Kozlowski 	 * coherency, and then restore interrupts
4266f0b7c0cSKrzysztof Kozlowski 	 */
427*13cfa6c4SKrzysztof Kozlowski 	cpu_leave_lowpower(core_id);
4286f0b7c0cSKrzysztof Kozlowski 
4296f0b7c0cSKrzysztof Kozlowski 	if (spurious)
4306f0b7c0cSKrzysztof Kozlowski 		pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
4316f0b7c0cSKrzysztof Kozlowski }
4326f0b7c0cSKrzysztof Kozlowski #endif /* CONFIG_HOTPLUG_CPU */
4336f0b7c0cSKrzysztof Kozlowski 
43406853ae4SMarc Zyngier struct smp_operations exynos_smp_ops __initdata = {
43506853ae4SMarc Zyngier 	.smp_init_cpus		= exynos_smp_init_cpus,
43606853ae4SMarc Zyngier 	.smp_prepare_cpus	= exynos_smp_prepare_cpus,
43706853ae4SMarc Zyngier 	.smp_secondary_init	= exynos_secondary_init,
43806853ae4SMarc Zyngier 	.smp_boot_secondary	= exynos_boot_secondary,
43906853ae4SMarc Zyngier #ifdef CONFIG_HOTPLUG_CPU
44006853ae4SMarc Zyngier 	.cpu_die		= exynos_cpu_die,
44106853ae4SMarc Zyngier #endif
44206853ae4SMarc Zyngier };
443